Merge branch 'master' into feature/synced_flush

Conflicts:
	src/main/java/org/elasticsearch/index/engine/InternalEngine.java
	src/main/java/org/elasticsearch/index/shard/IndexShard.java
	src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
	src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
This commit is contained in:
Simon Willnauer 2015-05-19 12:16:22 +02:00
commit 9d2852f0ab
270 changed files with 6535 additions and 6845 deletions

View File

@ -218,3 +218,6 @@ See the {client}/net-api/current/index.html[official Elasticsearch .NET client].
=== R
* https://github.com/Tomesch/elasticsearch[elasticsearch]
R client for Elasticsearch
* https://github.com/ropensci/elastic[elastic]:
A general purpose R client for Elasticsearch

View File

@ -123,3 +123,26 @@ settings and filter the returned buckets based on a `min_doc_count` setting (by
bucket that matches documents and the last one are returned). This histogram also supports the `extended_bounds`
setting, which enables extending the bounds of the histogram beyond the data itself (to read more on why you'd want to
do that please refer to the explanation <<search-aggregations-bucket-histogram-aggregation-extended-bounds,here>>).
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"publish_date" : {
"datehistogram" : {
"field" : "publish_date",
"interval": "year",
"missing": "2000-01-01" <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`.

View File

@ -317,3 +317,26 @@ Response:
}
}
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"quantity" : {
"histogram" : {
"field" : "quantity",
"interval": 10,
"missing": 0 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `quantity` field will fall into the same bucket as documents that have the value `0`.

View File

@ -655,3 +655,25 @@ in inner aggregations.
<1> experimental[] the possible values are `map`, `global_ordinals`, `global_ordinals_hash` and `global_ordinals_low_cardinality`
Please note that Elasticsearch will ignore this execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"tags" : {
"terms" : {
"field" : "tags",
"missing": "N/A" <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`.

View File

@ -72,4 +72,26 @@ It turned out that the exam was way above the level of the students and a grade
}
}
}
--------------------------------------------------
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grade_avg" : {
"avg" : {
"field" : "grade",
"missing": 10 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.

View File

@ -155,3 +155,24 @@ however since hashes need to be computed on the fly.
TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory.
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"tag_cardinality" : {
"cardinality" : {
"field" : "tag",
"missing": "N/A" <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `tag` field will fall into the same bucket as documents that have the value `N/A`.

View File

@ -116,4 +116,26 @@ It turned out that the exam was way above the level of the students and a grade
}
}
}
--------------------------------------------------
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grades_stats" : {
"extended_stats" : {
"field" : "grade",
"missing": 0 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `0`.

View File

@ -67,3 +67,24 @@ Let's say that the prices of the documents in our index are in USD, but we would
}
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grade_max" : {
"max" : {
"field" : "grade",
"missing": 10 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.

View File

@ -66,3 +66,25 @@ Let's say that the prices of the documents in our index are in USD, but we would
}
}
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grade_min" : {
"min" : {
"field" : "grade",
"missing": 10 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.

View File

@ -190,3 +190,25 @@ A "node" uses roughly 32 bytes of memory, so under worst-case scenarios (large a
of data which arrives sorted and in-order) the default settings will produce a
TDigest roughly 64KB in size. In practice data tends to be more random and
the TDigest will use less memory.
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grade_percentiles" : {
"percentiles" : {
"field" : "grade",
"missing": 10 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.

View File

@ -86,3 +86,25 @@ script to generate values which percentile ranks are calculated on
<2> Scripting supports parameterized input just like any other script
TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory.
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grade_ranks" : {
"percentile_ranks" : {
"field" : "grade",
"missing": 10 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.

View File

@ -78,4 +78,26 @@ It turned out that the exam was way above the level of the students and a grade
}
}
}
--------------------------------------------------
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"grades_stats" : {
"stats" : {
"field" : "grade",
"missing": 0 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `0`.

View File

@ -77,3 +77,25 @@ Computing the sum of squares over all stock tick changes:
}
}
--------------------------------------------------
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
{
"aggs" : {
"total_time" : {
"sum" : {
"field" : "took",
"missing": 100 <1>
}
}
}
}
--------------------------------------------------
<1> Documents without a value in the `took` field will fall into the same bucket as documents that have the value `100`.

View File

@ -7,16 +7,13 @@ via a regular expression. Accepts the following settings:
The following are settings that can be set for a `pattern` analyzer
type:
[cols="<,<",options="header",]
|===================================================================
|Setting |Description
|`lowercase` |Should terms be lowercased or not. Defaults to `true`.
|`pattern` |The regular expression pattern, defaults to `\W+`.
|`flags` |The regular expression flags.
|`stopwords` |A list of stopwords to initialize the stop filter with.
Defaults to an 'empty' stopword list Check
<<analysis-stop-analyzer,Stop Analyzer>> for more details.
|===================================================================
[horizontal]
`lowercase`:: Should terms be lowercased or not. Defaults to `true`.
`pattern`:: The regular expression pattern, defaults to `\W+`.
`flags`:: The regular expression flags.
`stopwords`:: A list of stopwords to initialize the stop filter with.
Defaults to an 'empty' stopword list Check
<<analysis-stop-analyzer,Stop Analyzer>> for more details.
*IMPORTANT*: The regular expression should match the *token separators*,
not the tokens themselves.
@ -29,101 +26,103 @@ Pattern API] for more details about `flags` options.
==== Pattern Analyzer Examples
In order to try out these examples, you should delete the `test` index
before running each example:
[source,js]
--------------------------------------------------
curl -XDELETE localhost:9200/test
--------------------------------------------------
before running each example.
[float]
===== Whitespace tokenizer
[source,js]
--------------------------------------------------
curl -XPUT 'localhost:9200/test' -d '
{
"settings":{
"analysis": {
"analyzer": {
"whitespace":{
"type": "pattern",
"pattern":"\\\\s+"
}
}
}
}
}'
DELETE test
curl 'localhost:9200/test/_analyze?pretty=1&analyzer=whitespace' -d 'foo,bar baz'
# "foo,bar", "baz"
PUT /test
{
"settings": {
"analysis": {
"analyzer": {
"whitespace": {
"type": "pattern",
"pattern": "\\s+"
}
}
}
}
}
GET /test/_analyze?analyzer=whitespace&text=foo,bar baz
# "foo,bar", "baz"
--------------------------------------------------
// AUTOSENSE
[float]
===== Non-word character tokenizer
[source,js]
--------------------------------------------------
DELETE test
curl -XPUT 'localhost:9200/test' -d '
{
"settings":{
"analysis": {
"analyzer": {
"nonword":{
"type": "pattern",
"pattern":"[^\\\\w]+"
}
}
}
PUT /test
{
"settings": {
"analysis": {
"analyzer": {
"nonword": {
"type": "pattern",
"pattern": "[^\\w]+" <1>
}
}'
}
}
}
}
curl 'localhost:9200/test/_analyze?pretty=1&analyzer=nonword' -d 'foo,bar baz'
# "foo,bar baz" becomes "foo", "bar", "baz"
GET /test/_analyze?analyzer=nonword&text=foo,bar baz
# "foo,bar baz" becomes "foo", "bar", "baz"
curl 'localhost:9200/test/_analyze?pretty=1&analyzer=nonword' -d 'type_1-type_4'
# "type_1","type_4"
GET /test/_analyze?analyzer=nonword&text=type_1-type_4
# "type_1","type_4"
--------------------------------------------------
// AUTOSENSE
[float]
===== CamelCase tokenizer
[source,js]
--------------------------------------------------
DELETE test
curl -XPUT 'localhost:9200/test?pretty=1' -d '
{
"settings":{
"analysis": {
"analyzer": {
"camel":{
"type": "pattern",
"pattern":"([^\\\\p{L}\\\\d]+)|(?<=\\\\D)(?=\\\\d)|(?<=\\\\d)(?=\\\\D)|(?<=[\\\\p{L}&&[^\\\\p{Lu}]])(?=\\\\p{Lu})|(?<=\\\\p{Lu})(?=\\\\p{Lu}[\\\\p{L}&&[^\\\\p{Lu}]])"
}
}
}
PUT /test?pretty=1
{
"settings": {
"analysis": {
"analyzer": {
"camel": {
"type": "pattern",
"pattern": "([^\\p{L}\\d]+)|(?<=\\D)(?=\\d)|(?<=\\d)(?=\\D)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])"
}
}'
}
}
}
}
curl 'localhost:9200/test/_analyze?pretty=1&analyzer=camel' -d '
MooseX::FTPClass2_beta
'
# "moose","x","ftp","class","2","beta"
GET /test/_analyze?analyzer=camel&text=MooseX::FTPClass2_beta
# "moose","x","ftp","class","2","beta"
--------------------------------------------------
// AUTOSENSE
The regex above is easier to understand as:
[source,js]
--------------------------------------------------
([^\\p{L}\\d]+) # swallow non letters and numbers,
| (?<=\\D)(?=\\d) # or non-number followed by number,
| (?<=\\d)(?=\\D) # or number followed by non-number,
| (?<=[ \\p{L} && [^\\p{Lu}]]) # or lower case
(?=\\p{Lu}) # followed by upper case,
| (?<=\\p{Lu}) # or upper case
(?=\\p{Lu} # followed by upper case
[\\p{L}&&[^\\p{Lu}]] # then lower case
)
([^\p{L}\d]+) # swallow non letters and numbers,
| (?<=\D)(?=\d) # or non-number followed by number,
| (?<=\d)(?=\D) # or number followed by non-number,
| (?<=[ \p{L} && [^\p{Lu}]]) # or lower case
(?=\p{Lu}) # followed by upper case,
| (?<=\p{Lu}) # or upper case
(?=\p{Lu} # followed by upper case
[\p{L}&&[^\p{Lu}]] # then lower case
)
--------------------------------------------------

View File

@ -219,8 +219,8 @@ the maximum allowed Levenshtein Edit Distance (or number of edits)
--
generates an edit distance based on the length of the term. For lengths:
`0..1`:: must match exactly
`1..5`:: one edit allowed
`0..2`:: must match exactly
`3..5`:: one edit allowed
`>5`:: two edits allowed
`AUTO` should generally be the preferred value for `fuzziness`.

View File

@ -131,8 +131,6 @@ get operations |2
gets |0s
|`get.missing_total` |`gmto`, `getMissingTotal` |No |Number of failed
get operations |1
|`id_cache.memory_size` |`im`, `idCacheMemory` |No |Used ID cache
memory |216b
|`indexing.delete_current` |`idc`, `indexingDeleteCurrent` |No |Number
of current deletion operations |0
|`indexing.delete_time` |`idti`, `indexingDeleteTime` |No |Time spent in

View File

@ -61,10 +61,6 @@ Will return, for example:
"memory_size_in_bytes": 0,
"evictions": 0
},
"id_cache": {
"memory_size": "0b",
"memory_size_in_bytes": 0
},
"completion": {
"size": "0b",
"size_in_bytes": 0

View File

@ -18,6 +18,19 @@ curl -XGET 'localhost:9200/_analyze' -d '
coming[2.0.0, body based parameters were added in 2.0.0]
If text parameter is provided as array of strings, it is analyzed as a multi-valued field.
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_analyze' -d '
{
"analyzer" : "standard",
"text" : ["this is a test", "the second text"]
}'
--------------------------------------------------
coming[2.0.0, body based parameters were added in 2.0.0]
Or by building a custom transient analyzer out of tokenizers,
token filters and char filters. Token filters can use the shorter 'filters'
parameter name:

View File

@ -10,8 +10,7 @@ $ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
--------------------------------------------------
The API, by default, will clear all caches. Specific caches can be cleaned
explicitly by setting `filter`, `fielddata`, `query_cache`,
or `id_cache` to `true`.
explicitly by setting `filter`, `fielddata` or `query_cache`.
All caches relating to a specific field(s) can also be cleared by
specifying `fields` parameter with a comma delimited list of the

View File

@ -31,9 +31,7 @@ More information on how to define type mappings can be found in the
When an existing mapping already exists under the given type, the two
mapping definitions, the one already defined, and the new ones are
merged. The `ignore_conflicts` parameters can be used to control if
conflicts should be ignored or not, by default, it is set to `false`
which means conflicts are *not* ignored.
merged. If there are conflicts then the update will be rejected.
The definition of conflict is really dependent on the type merged, but
in general, if a different core type is defined, it is considered as a

View File

@ -177,6 +177,7 @@ A `RoutingMissingException` is now thrown instead.
* The setting `index.mapping.allow_type_wrapper` has been removed. Documents should always be sent without the type as the root element.
* The delete mappings API has been removed. Mapping types can no longer be deleted.
* The `ignore_conflicts` option of the put mappings API has been removed. Conflicts can't be ignored anymore.
==== Removed type prefix on field names in queries
Types can no longer be specified on fields within queries. Instead, specify type restrictions in the search request.
@ -498,7 +499,8 @@ systems and the provided start/stop scripts.
=== Analyze API
The Analyze API return 0 as first Token's position instead of 1.
* The Analyze API return 0 as first Token's position instead of 1.
* The `text()` method on `AnalyzeRequest` now returns `String[]` instead of `String`.
=== Multiple data.path striping
@ -560,3 +562,48 @@ same search request will likely be off if `top_children` was used.
=== Removed file based index templates
Index templates can no longer be configured on disk. Use the `_template` API instead.
[float]
=== Removed `id_cache` from stats apis
Removed `id_cache` metric from nodes stats, indices stats and cluster stats apis. This metric has also been removed
from the shards cat, indices cat and nodes cat apis. Parent/child memory is now reported under fielddata, because it
has internally be using fielddata for a while now.
To just see how much parent/child related field data is taking, the `fielddata_fields` option can be used on the stats
apis. Indices stats example:
[source,js]
--------------------------------------------------
curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent"
--------------------------------------------------
Parent/child is using field data for the `_parent` field since version `1.1.0`, but the memory stats for the `_parent`
field were still shown under `id_cache` metric in the stats apis for backwards compatible reasons between 1.x versions.
Before version `1.1.0` the parent/child had its own in-memory data structures for id values in the `_parent` field.
[float]
=== Removed `id_cache` from clear cache api
Removed `id_cache` option from the clear cache apis. The `fielddata` option should be used to clear `_parent` field
from fielddata.
[float]
=== Highlighting
The default value for the `require_field_match` option is `true` rather than
`false`, meaning that the highlighters will take the fields that were queried
into account by default. That means for instance that highlighting any field
when querying the `_all` field will produce no highlighted snippets by default,
given that the match was on the `_all` field only. Querying the same fields
that need to be highlighted is the cleaner solution to get highlighted snippets
back. Otherwise `require_field_match` option can be set to `false` to ignore
field names completely when highlighting.
The postings highlighter doesn't support the `require_field_match` option
anymore, it will only highlight fields that were queried.
The `match` query with type set to `match_phrase_prefix` is not supported by the
postings highlighter. No highlighted snippets will be returned.

View File

@ -82,11 +82,11 @@ Additionally, every child document is mapped to its parent using a long
value (approximately). It is advisable to keep the string parent ID short
in order to reduce memory usage.
You can check how much memory is being used by the ID cache using the
<<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
You can check how much memory is being used by the `_parent` field in the fielddata cache
using the <<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
APIS, eg:
[source,js]
--------------------------------------------------
curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human"
curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent"
--------------------------------------------------

View File

@ -57,13 +57,13 @@ Additionally, every child document is mapped to its parent using a long
value (approximately). It is advisable to keep the string parent ID short
in order to reduce memory usage.
You can check how much memory is being used by the ID cache using the
<<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
You can check how much memory is being used by the `_parent` field in the fielddata cache
using the <<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
APIS, eg:
[source,js]
--------------------------------------------------
curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human"
curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent"
--------------------------------------------------

View File

@ -72,11 +72,9 @@ that the query is composed of, regardless of whether they are actually part of
a query match, effectively ignoring their positions.
[WARNING]
The postings highlighter does support highlighting of multi term queries, like
prefix queries, wildcard queries and so on. On the other hand, this requires
the queries to be rewritten using a proper
<<query-dsl-multi-term-rewrite,rewrite method>> that supports multi term
extraction, which is a potentially expensive operation.
The postings highlighter doesn't support highlighting some complex queries,
like a `match` query with `type` set to `match_phrase_prefix`. No highlighted
snippets will be returned in that case.
[[fast-vector-highlighter]]
==== Fast vector highlighter
@ -406,10 +404,10 @@ at the field level.
[[field-match]]
==== Require Field Match
`require_field_match` can be set to `true` which will cause a field to
be highlighted only if a query matched that field. `false` means that
terms are highlighted on all requested fields regardless if the query
matches specifically on them.
`require_field_match` can be set to `false` which will cause any field to
be highlighted regardless of whether the query matched specifically on them.
The default behaviour is `true`, meaning that only fields that hold a query
match will be highlighted.
[[boundary-characters]]
==== Boundary Characters

13
pom.xml
View File

@ -32,7 +32,7 @@
<properties>
<lucene.version>5.2.0</lucene.version>
<lucene.snapshot.revision>1678978</lucene.snapshot.revision>
<lucene.snapshot.revision>1680200</lucene.snapshot.revision>
<lucene.maven.version>5.2.0-snapshot-${lucene.snapshot.revision}</lucene.maven.version>
<testframework.version>2.1.14</testframework.version>
<tests.jvms>auto</tests.jvms>
@ -229,13 +229,6 @@
<version>0.7.1</version>
</dependency>
<dependency> <!-- ES uses byte* hashes -->
<groupId>com.carrotsearch</groupId>
<artifactId>hppc</artifactId>
<version>0.7.1</version>
<classifier>esoteric</classifier>
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
@ -794,8 +787,6 @@
<!-- unit tests for test framework classes-->
<exclude>org/elasticsearch/test/test/**/*</exclude>
</excludes>
<!-- Resources are large and not really helpful as "test sources". -->
<excludeResources>true</excludeResources>
</configuration>
</execution>
</executions>
@ -1526,6 +1517,7 @@
<signaturesFile>dev-tools/forbidden/all-signatures.txt</signaturesFile>
</signaturesFiles>
<signatures>${forbidden.test.signatures}</signatures>
<suppressAnnotations><annotation>**.SuppressForbidden</annotation></suppressAnnotations>
</configuration>
<phase>test-compile</phase>
<goals>
@ -1546,6 +1538,7 @@
</goals>
<configuration>
<includes>
<include>rest-api-spec/**/*</include>
<include>org/elasticsearch/test/**/*</include>
<include>org/elasticsearch/bootstrap/BootstrapForTesting.class</include>
<include>org/elasticsearch/common/cli/CliToolTestCase.class</include>

View File

@ -37,7 +37,7 @@
"description" : "With `true`, specify that a local shard should be used if available, with `false`, use a random shard (default: true)"
},
"text": {
"type" : "string",
"type" : "list",
"description" : "The text on which the analysis should be performed (when request body is not used)"
},
"tokenizer": {

View File

@ -32,14 +32,6 @@
"type" : "boolean",
"description" : "Clear filter caches"
},
"id": {
"type" : "boolean",
"description" : "Clear ID caches for parent/child"
},
"id_cache": {
"type" : "boolean",
"description" : "Clear ID caches for parent/child"
},
"ignore_unavailable": {
"type" : "boolean",
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"

View File

@ -17,10 +17,6 @@
}
},
"params": {
"ignore_conflicts": {
"type" : "boolean",
"description" : "Specify whether to ignore conflicts while updating the mapping (default: false)"
},
"timeout": {
"type" : "time",
"description" : "Explicit operation timeout"

View File

@ -17,7 +17,7 @@
},
"metric" : {
"type" : "list",
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "id_cache", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"description" : "Limit the information returned the specific metrics."
}
},

View File

@ -20,7 +20,7 @@
},
"index_metric" : {
"type" : "list",
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "id_cache", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"description" : "Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified."
},
"node_id": {

View File

@ -29,7 +29,6 @@
get.exists_total .+ \n
get.missing_time .+ \n
get.missing_total .+ \n
id_cache.memory_size .+ \n
indexing.delete_current .+ \n
indexing.delete_time .+ \n
indexing.delete_total .+ \n

View File

@ -63,3 +63,11 @@ setup:
body: { "text": "Bar Foo", "filters": ["lowercase"], "tokenizer": keyword }
- length: {tokens: 1 }
- match: { tokens.0.token: bar foo }
---
"Array text":
- do:
indices.analyze:
body: { "text": ["Foo Bar", "Baz"], "filters": ["lowercase"], "tokenizer": keyword }
- length: {tokens: 2 }
- match: { tokens.0.token: foo bar }
- match: { tokens.1.token: baz }

View File

@ -30,7 +30,6 @@ setup:
- is_true: _all.total.flush
- is_true: _all.total.warmer
- is_true: _all.total.filter_cache
- is_true: _all.total.id_cache
- is_true: _all.total.fielddata
- is_true: _all.total.percolate
- is_true: _all.total.completion
@ -54,7 +53,6 @@ setup:
- is_true: _all.total.flush
- is_true: _all.total.warmer
- is_true: _all.total.filter_cache
- is_true: _all.total.id_cache
- is_true: _all.total.fielddata
- is_true: _all.total.percolate
- is_true: _all.total.completion
@ -78,7 +76,6 @@ setup:
- is_false: _all.total.flush
- is_false: _all.total.warmer
- is_false: _all.total.filter_cache
- is_false: _all.total.id_cache
- is_false: _all.total.fielddata
- is_false: _all.total.percolate
- is_false: _all.total.completion
@ -102,7 +99,6 @@ setup:
- is_false: _all.total.flush
- is_false: _all.total.warmer
- is_false: _all.total.filter_cache
- is_false: _all.total.id_cache
- is_false: _all.total.fielddata
- is_false: _all.total.percolate
- is_false: _all.total.completion
@ -127,7 +123,6 @@ setup:
- is_false: _all.total.flush
- is_false: _all.total.warmer
- is_false: _all.total.filter_cache
- is_false: _all.total.id_cache
- is_false: _all.total.fielddata
- is_false: _all.total.percolate
- is_false: _all.total.completion

View File

@ -65,6 +65,8 @@ public class CustomPassageFormatter extends PassageFormatter {
//we remove the paragraph separator if present at the end of the snippet (we used it as separator between values)
if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) {
sb.deleteCharAt(sb.length() - 1);
} else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) {
sb.deleteCharAt(sb.length() - 1);
}
//and we trim the snippets too
snippets[j] = new Snippet(sb.toString().trim(), passage.score, passage.numMatches > 0);

View File

@ -18,121 +18,85 @@
package org.apache.lucene.search.postingshighlight;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Strings;
import org.elasticsearch.search.highlight.HighlightUtils;
import org.apache.lucene.search.Query;
import java.io.IOException;
import java.text.BreakIterator;
import java.util.List;
import java.util.Map;
/**
* Subclass of the {@link XPostingsHighlighter} that works for a single field in a single document.
* It receives the field values as input and it performs discrete highlighting on each single value
* calling the highlightDoc method multiple times.
* It allows to pass in the query terms to avoid calling extract terms multiple times.
*
* The use that we make of the postings highlighter is not optimal. It would be much better to
* highlight multiple docs in a single call, as we actually lose its sequential IO. But that would require:
* 1) to make our fork more complex and harder to maintain to perform discrete highlighting (needed to return
* a different snippet per value when number_of_fragments=0 and the field has multiple values)
* 2) refactoring of the elasticsearch highlight api which currently works per hit
* Subclass of the {@link PostingsHighlighter} that works for a single field in a single document.
* Uses a custom {@link PassageFormatter}. Accepts field content as a constructor argument, given that loading
* is custom and can be done reading from _source field. Supports using different {@link BreakIterator} to break
* the text into fragments. Considers every distinct field value as a discrete passage for highlighting (unless
* the whole content needs to be highlighted). Supports both returning empty snippets and non highlighted snippets
* when no highlighting can be performed.
*
* The use that we make of the postings highlighter is not optimal. It would be much better to highlight
* multiple docs in a single call, as we actually lose its sequential IO. That would require to
* refactor the elasticsearch highlight api which currently works per hit.
*/
public final class CustomPostingsHighlighter extends XPostingsHighlighter {
public final class CustomPostingsHighlighter extends PostingsHighlighter {
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
private static final Passage[] EMPTY_PASSAGE = new Passage[0];
private final Analyzer analyzer;
private final CustomPassageFormatter passageFormatter;
private final int noMatchSize;
private final int totalContentLength;
private final String[] fieldValues;
private final int[] fieldValuesOffsets;
private int currentValueIndex = 0;
private final BreakIterator breakIterator;
private final boolean returnNonHighlightedSnippets;
private final String fieldValue;
private BreakIterator breakIterator;
public CustomPostingsHighlighter(CustomPassageFormatter passageFormatter, List<Object> fieldValues, boolean mergeValues, int maxLength, int noMatchSize) {
super(maxLength);
this.passageFormatter = passageFormatter;
this.noMatchSize = noMatchSize;
if (mergeValues) {
String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(getMultiValuedSeparator("")));
String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
this.fieldValues = new String[]{fieldValue};
this.fieldValuesOffsets = new int[]{0};
this.totalContentLength = fieldValue.length();
} else {
this.fieldValues = new String[fieldValues.size()];
this.fieldValuesOffsets = new int[fieldValues.size()];
int contentLength = 0;
int offset = 0;
int previousLength = -1;
for (int i = 0; i < fieldValues.size(); i++) {
String rawValue = fieldValues.get(i).toString();
String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
this.fieldValues[i] = fieldValue;
contentLength += fieldValue.length();
offset += previousLength + 1;
this.fieldValuesOffsets[i] = offset;
previousLength = fieldValue.length();
}
this.totalContentLength = contentLength;
}
/**
* Creates a new instance of {@link CustomPostingsHighlighter}
*
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally
* @param passageFormatter our own {@link PassageFormatter} which generates snippets in forms of {@link Snippet} objects
* @param fieldValue the original field values as constructor argument, loaded from te _source field or the relevant stored field.
* @param returnNonHighlightedSnippets whether non highlighted snippets should be returned rather than empty snippets when
* no highlighting can be performed
*/
public CustomPostingsHighlighter(Analyzer analyzer, CustomPassageFormatter passageFormatter, String fieldValue, boolean returnNonHighlightedSnippets) {
this(analyzer, passageFormatter, null, fieldValue, returnNonHighlightedSnippets);
}
/*
Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object
/**
* Creates a new instance of {@link CustomPostingsHighlighter}
*
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally
* @param passageFormatter our own {@link PassageFormatter} which generates snippets in forms of {@link Snippet} objects
* @param breakIterator an instance {@link BreakIterator} selected depending on the highlighting options
* @param fieldValue the original field values as constructor argument, loaded from te _source field or the relevant stored field.
* @param returnNonHighlightedSnippets whether non highlighted snippets should be returned rather than empty snippets when
* no highlighting can be performed
*/
public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexReader reader, int docId, int maxPassages) throws IOException {
IndexReaderContext readerContext = reader.getContext();
List<LeafReaderContext> leaves = readerContext.leaves();
public CustomPostingsHighlighter(Analyzer analyzer, CustomPassageFormatter passageFormatter, BreakIterator breakIterator, String fieldValue, boolean returnNonHighlightedSnippets) {
this.analyzer = analyzer;
this.passageFormatter = passageFormatter;
this.breakIterator = breakIterator;
this.returnNonHighlightedSnippets = returnNonHighlightedSnippets;
this.fieldValue = fieldValue;
}
String[] contents = new String[]{loadCurrentFieldValue()};
Map<Integer, Object> snippetsMap = highlightField(field, contents, getBreakIterator(field), terms, new int[]{docId}, leaves, maxPassages);
//increment the current value index so that next time we'll highlight the next value if available
currentValueIndex++;
Object snippetObject = snippetsMap.get(docId);
if (snippetObject != null && snippetObject instanceof Snippet[]) {
return (Snippet[]) snippetObject;
/**
* Highlights terms extracted from the provided query within the content of the provided field name
*/
public Snippet[] highlightField(String field, Query query, IndexSearcher searcher, int docId, int maxPassages) throws IOException {
Map<String, Object[]> fieldsAsObjects = super.highlightFieldsAsObjects(new String[]{field}, query, searcher, new int[]{docId}, new int[]{maxPassages});
Object[] snippetObjects = fieldsAsObjects.get(field);
if (snippetObjects != null) {
//one single document at a time
assert snippetObjects.length == 1;
Object snippetObject = snippetObjects[0];
if (snippetObject != null && snippetObject instanceof Snippet[]) {
return (Snippet[]) snippetObject;
}
}
return EMPTY_SNIPPET;
}
/*
Method provided through our own fork: allows to do proper scoring when doing per value discrete highlighting.
Used to provide the total length of the field (all values) for proper scoring.
*/
@Override
protected int getContentLength(String field, int docId) {
return totalContentLength;
}
/*
Method provided through our own fork: allows to perform proper per value discrete highlighting.
Used to provide the offset for the current value.
*/
@Override
protected int getOffsetForCurrentValue(String field, int docId) {
if (currentValueIndex < fieldValuesOffsets.length) {
return fieldValuesOffsets[currentValueIndex];
}
throw new IllegalArgumentException("No more values offsets to return");
}
public void setBreakIterator(BreakIterator breakIterator) {
this.breakIterator = breakIterator;
}
@Override
protected PassageFormatter getFormatter(String field) {
return passageFormatter;
@ -146,41 +110,27 @@ public final class CustomPostingsHighlighter extends XPostingsHighlighter {
return breakIterator;
}
@Override
protected char getMultiValuedSeparator(String field) {
//U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
return HighlightUtils.PARAGRAPH_SEPARATOR;
}
/*
By default the postings highlighter returns non highlighted snippet when there are no matches.
We want to return no snippets by default, unless no_match_size is greater than 0
*/
@Override
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
if (noMatchSize > 0) {
if (returnNonHighlightedSnippets) {
//we want to return the first sentence of the first snippet only
return super.getEmptyHighlight(fieldName, bi, 1);
}
return EMPTY_PASSAGE;
}
/*
Not needed since we call our own loadCurrentFieldValue explicitly, but we override it anyway for consistency.
*/
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
return new String[][]{new String[]{loadCurrentFieldValue()}};
protected Analyzer getIndexAnalyzer(String field) {
return analyzer;
}
/*
Our own method that returns the field values, which relies on the content that was provided when creating the highlighter.
Supports per value discrete highlighting calling the highlightDoc method multiple times, one per value.
*/
protected String loadCurrentFieldValue() {
if (currentValueIndex < fieldValues.length) {
return fieldValues[currentValueIndex];
}
throw new IllegalArgumentException("No more values to return");
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
//we only highlight one field, one document at a time
return new String[][]{new String[]{fieldValue}};
}
}

View File

@ -0,0 +1,153 @@
/*
Licensed to Elasticsearch under one or more contributor
license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright
ownership. Elasticsearch licenses this file to you under
the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
*
http://www.apache.org/licenses/LICENSE-2.0
*
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.lucene.search.postingshighlight;
import java.text.BreakIterator;
import java.text.CharacterIterator;
/**
* A {@link BreakIterator} that breaks the text whenever a certain separator, provided as a constructor argument, is found.
*/
public class CustomSeparatorBreakIterator extends BreakIterator {
private final char separator;
private CharacterIterator text;
private int current;
public CustomSeparatorBreakIterator(char separator) {
this.separator = separator;
}
@Override
public int current() {
return current;
}
@Override
public int first() {
text.setIndex(text.getBeginIndex());
return current = text.getIndex();
}
@Override
public int last() {
text.setIndex(text.getEndIndex());
return current = text.getIndex();
}
@Override
public int next() {
if (text.getIndex() == text.getEndIndex()) {
return DONE;
} else {
return advanceForward();
}
}
private int advanceForward() {
char c;
while( (c = text.next()) != CharacterIterator.DONE) {
if (c == separator) {
return current = text.getIndex() + 1;
}
}
assert text.getIndex() == text.getEndIndex();
return current = text.getIndex();
}
@Override
public int following(int pos) {
if (pos < text.getBeginIndex() || pos > text.getEndIndex()) {
throw new IllegalArgumentException("offset out of bounds");
} else if (pos == text.getEndIndex()) {
// this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something)
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909
text.setIndex(text.getEndIndex());
current = text.getIndex();
return DONE;
} else {
text.setIndex(pos);
current = text.getIndex();
return advanceForward();
}
}
@Override
public int previous() {
if (text.getIndex() == text.getBeginIndex()) {
return DONE;
} else {
return advanceBackward();
}
}
private int advanceBackward() {
char c;
while( (c = text.previous()) != CharacterIterator.DONE) {
if (c == separator) {
return current = text.getIndex() + 1;
}
}
assert text.getIndex() == text.getBeginIndex();
return current = text.getIndex();
}
@Override
public int preceding(int pos) {
if (pos < text.getBeginIndex() || pos > text.getEndIndex()) {
throw new IllegalArgumentException("offset out of bounds");
} else if (pos == text.getBeginIndex()) {
// this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something)
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909
text.setIndex(text.getBeginIndex());
current = text.getIndex();
return DONE;
} else {
text.setIndex(pos);
current = text.getIndex();
return advanceBackward();
}
}
@Override
public int next(int n) {
if (n < 0) {
for (int i = 0; i < -n; i++) {
previous();
}
} else {
for (int i = 0; i < n; i++) {
next();
}
}
return current();
}
@Override
public CharacterIterator getText() {
return text;
}
@Override
public void setText(CharacterIterator newText) {
text = newText;
current = text.getBeginIndex();
}
}

View File

@ -22,7 +22,7 @@ package org.apache.lucene.search.postingshighlight;
* Represents a scored highlighted snippet.
* It's our own arbitrary object that we get back from the postings highlighter when highlighting a document.
* Every snippet contains its formatted text and its score.
* The score is needed since we highlight every single value separately and we might want to return snippets sorted by score.
* The score is needed in case we want to sort snippets by score, they get sorted by position in the text by default.
*/
public class Snippet {

View File

@ -1,772 +0,0 @@
/*
* Licensed to Elasticsearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elasticsearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.lucene.search.postingshighlight;
import org.apache.lucene.index.*;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.apache.lucene.util.UnicodeUtil;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.text.BreakIterator;
import java.util.*;
/*
FORKED from Lucene 4.5 to be able to:
1) support discrete highlighting for multiple values, so that we can return a different snippet per value when highlighting the whole text
2) call the highlightField method directly from subclasses and provide the terms by ourselves
3) Applied LUCENE-4906 to allow PassageFormatter to return arbitrary objects (LUCENE 4.6)
All our changes start with //BEGIN EDIT
*/
public class XPostingsHighlighter {
//BEGIN EDIT added method to override offset for current value (default 0)
//we need this to perform discrete highlighting per field
protected int getOffsetForCurrentValue(String field, int docId) {
return 0;
}
//END EDIT
//BEGIN EDIT
//we need this to fix scoring when highlighting every single value separately, since the score depends on the total length of the field (all values rather than only the current one)
protected int getContentLength(String field, int docId) {
return -1;
}
//END EDIT
// TODO: maybe allow re-analysis for tiny fields? currently we require offsets,
// but if the analyzer is really fast and the field is tiny, this might really be
// unnecessary.
/** for rewriting: we don't want slow processing from MTQs */
private static final IndexSearcher EMPTY_INDEXSEARCHER;
static {
try {
IndexReader emptyReader = new MultiReader();
EMPTY_INDEXSEARCHER = new IndexSearcher(emptyReader);
EMPTY_INDEXSEARCHER.setQueryCache(null);
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
}
/** Default maximum content size to process. Typically snippets
* closer to the beginning of the document better summarize its content */
public static final int DEFAULT_MAX_LENGTH = 10000;
private final int maxLength;
/** Set the first time {@link #getFormatter} is called,
* and then reused. */
private PassageFormatter defaultFormatter;
/** Set the first time {@link #getScorer} is called,
* and then reused. */
private PassageScorer defaultScorer;
/**
* Creates a new highlighter with default parameters.
*/
public XPostingsHighlighter() {
this(DEFAULT_MAX_LENGTH);
}
/**
* Creates a new highlighter, specifying maximum content length.
* @param maxLength maximum content size to process.
* @throws IllegalArgumentException if <code>maxLength</code> is negative or <code>Integer.MAX_VALUE</code>
*/
public XPostingsHighlighter(int maxLength) {
if (maxLength < 0 || maxLength == Integer.MAX_VALUE) {
// two reasons: no overflow problems in BreakIterator.preceding(offset+1),
// our sentinel in the offsets queue uses this value to terminate.
throw new IllegalArgumentException("maxLength must be < Integer.MAX_VALUE");
}
this.maxLength = maxLength;
}
/** Returns the {@link java.text.BreakIterator} to use for
* dividing text into passages. This returns
* {@link java.text.BreakIterator#getSentenceInstance(java.util.Locale)} by default;
* subclasses can override to customize. */
protected BreakIterator getBreakIterator(String field) {
return BreakIterator.getSentenceInstance(Locale.ROOT);
}
/** Returns the {@link PassageFormatter} to use for
* formatting passages into highlighted snippets. This
* returns a new {@code PassageFormatter} by default;
* subclasses can override to customize. */
protected PassageFormatter getFormatter(String field) {
if (defaultFormatter == null) {
defaultFormatter = new DefaultPassageFormatter();
}
return defaultFormatter;
}
/** Returns the {@link PassageScorer} to use for
* ranking passages. This
* returns a new {@code PassageScorer} by default;
* subclasses can override to customize. */
protected PassageScorer getScorer(String field) {
if (defaultScorer == null) {
defaultScorer = new PassageScorer();
}
return defaultScorer;
}
/**
* Highlights the top passages from a single field.
*
* @param field field name to highlight.
* Must have a stored string value and also be indexed with offsets.
* @param query query to highlight.
* @param searcher searcher that was previously used to execute the query.
* @param topDocs TopDocs containing the summary result documents to highlight.
* @return Array of formatted snippets corresponding to the documents in <code>topDocs</code>.
* If no highlights were found for a document, the
* first sentence for the field will be returned.
* @throws java.io.IOException if an I/O error occurred during processing
* @throws IllegalArgumentException if <code>field</code> was indexed without
* {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
*/
public String[] highlight(String field, Query query, IndexSearcher searcher, TopDocs topDocs) throws IOException {
return highlight(field, query, searcher, topDocs, 1);
}
/**
* Highlights the top-N passages from a single field.
*
* @param field field name to highlight.
* Must have a stored string value and also be indexed with offsets.
* @param query query to highlight.
* @param searcher searcher that was previously used to execute the query.
* @param topDocs TopDocs containing the summary result documents to highlight.
* @param maxPassages The maximum number of top-N ranked passages used to
* form the highlighted snippets.
* @return Array of formatted snippets corresponding to the documents in <code>topDocs</code>.
* If no highlights were found for a document, the
* first {@code maxPassages} sentences from the
* field will be returned.
* @throws IOException if an I/O error occurred during processing
* @throws IllegalArgumentException if <code>field</code> was indexed without
* {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
*/
public String[] highlight(String field, Query query, IndexSearcher searcher, TopDocs topDocs, int maxPassages) throws IOException {
Map<String,String[]> res = highlightFields(new String[] { field }, query, searcher, topDocs, new int[] { maxPassages });
return res.get(field);
}
/**
* Highlights the top passages from multiple fields.
* <p>
* Conceptually, this behaves as a more efficient form of:
* <pre class="prettyprint">
* Map m = new HashMap();
* for (String field : fields) {
* m.put(field, highlight(field, query, searcher, topDocs));
* }
* return m;
* </pre>
*
* @param fields field names to highlight.
* Must have a stored string value and also be indexed with offsets.
* @param query query to highlight.
* @param searcher searcher that was previously used to execute the query.
* @param topDocs TopDocs containing the summary result documents to highlight.
* @return Map keyed on field name, containing the array of formatted snippets
* corresponding to the documents in <code>topDocs</code>.
* If no highlights were found for a document, the
* first sentence from the field will be returned.
* @throws IOException if an I/O error occurred during processing
* @throws IllegalArgumentException if <code>field</code> was indexed without
* {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
*/
public Map<String,String[]> highlightFields(String fields[], Query query, IndexSearcher searcher, TopDocs topDocs) throws IOException {
int maxPassages[] = new int[fields.length];
Arrays.fill(maxPassages, 1);
return highlightFields(fields, query, searcher, topDocs, maxPassages);
}
/**
* Highlights the top-N passages from multiple fields.
* <p>
* Conceptually, this behaves as a more efficient form of:
* <pre class="prettyprint">
* Map m = new HashMap();
* for (String field : fields) {
* m.put(field, highlight(field, query, searcher, topDocs, maxPassages));
* }
* return m;
* </pre>
*
* @param fields field names to highlight.
* Must have a stored string value and also be indexed with offsets.
* @param query query to highlight.
* @param searcher searcher that was previously used to execute the query.
* @param topDocs TopDocs containing the summary result documents to highlight.
* @param maxPassages The maximum number of top-N ranked passages per-field used to
* form the highlighted snippets.
* @return Map keyed on field name, containing the array of formatted snippets
* corresponding to the documents in <code>topDocs</code>.
* If no highlights were found for a document, the
* first {@code maxPassages} sentences from the
* field will be returned.
* @throws IOException if an I/O error occurred during processing
* @throws IllegalArgumentException if <code>field</code> was indexed without
* {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
*/
public Map<String,String[]> highlightFields(String fields[], Query query, IndexSearcher searcher, TopDocs topDocs, int maxPassages[]) throws IOException {
final ScoreDoc scoreDocs[] = topDocs.scoreDocs;
int docids[] = new int[scoreDocs.length];
for (int i = 0; i < docids.length; i++) {
docids[i] = scoreDocs[i].doc;
}
return highlightFields(fields, query, searcher, docids, maxPassages);
}
/**
* Highlights the top-N passages from multiple fields,
* for the provided int[] docids.
*
* @param fieldsIn field names to highlight.
* Must have a stored string value and also be indexed with offsets.
* @param query query to highlight.
* @param searcher searcher that was previously used to execute the query.
* @param docidsIn containing the document IDs to highlight.
* @param maxPassagesIn The maximum number of top-N ranked passages per-field used to
* form the highlighted snippets.
* @return Map keyed on field name, containing the array of formatted snippets
* corresponding to the documents in <code>topDocs</code>.
* If no highlights were found for a document, the
* first {@code maxPassages} from the field will
* be returned.
* @throws IOException if an I/O error occurred during processing
* @throws IllegalArgumentException if <code>field</code> was indexed without
* {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
*/
public Map<String,String[]> highlightFields(String fieldsIn[], Query query, IndexSearcher searcher, int[] docidsIn, int maxPassagesIn[]) throws IOException {
Map<String,String[]> snippets = new HashMap<>();
for(Map.Entry<String,Object[]> ent : highlightFieldsAsObjects(fieldsIn, query, searcher, docidsIn, maxPassagesIn).entrySet()) {
Object[] snippetObjects = ent.getValue();
String[] snippetStrings = new String[snippetObjects.length];
snippets.put(ent.getKey(), snippetStrings);
for(int i=0;i<snippetObjects.length;i++) {
Object snippet = snippetObjects[i];
if (snippet != null) {
snippetStrings[i] = snippet.toString();
}
}
}
return snippets;
}
public Map<String,Object[]> highlightFieldsAsObjects(String fieldsIn[], Query query, IndexSearcher searcher, int[] docidsIn, int maxPassagesIn[]) throws IOException {
if (fieldsIn.length < 1) {
throw new IllegalArgumentException("fieldsIn must not be empty");
}
if (fieldsIn.length != maxPassagesIn.length) {
throw new IllegalArgumentException("invalid number of maxPassagesIn");
}
SortedSet<Term> queryTerms = new TreeSet<>();
EMPTY_INDEXSEARCHER.createNormalizedWeight(query, false).extractTerms(queryTerms);
IndexReaderContext readerContext = searcher.getIndexReader().getContext();
List<LeafReaderContext> leaves = readerContext.leaves();
// Make our own copies because we sort in-place:
int[] docids = new int[docidsIn.length];
System.arraycopy(docidsIn, 0, docids, 0, docidsIn.length);
final String fields[] = new String[fieldsIn.length];
System.arraycopy(fieldsIn, 0, fields, 0, fieldsIn.length);
final int maxPassages[] = new int[maxPassagesIn.length];
System.arraycopy(maxPassagesIn, 0, maxPassages, 0, maxPassagesIn.length);
// sort for sequential io
Arrays.sort(docids);
new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
String tmp = fields[i];
fields[i] = fields[j];
fields[j] = tmp;
int tmp2 = maxPassages[i];
maxPassages[i] = maxPassages[j];
maxPassages[j] = tmp2;
}
@Override
protected int compare(int i, int j) {
return fields[i].compareTo(fields[j]);
}
}.sort(0, fields.length);
// pull stored data:
String[][] contents = loadFieldValues(searcher, fields, docids, maxLength);
Map<String,Object[]> highlights = new HashMap<>();
for (int i = 0; i < fields.length; i++) {
String field = fields[i];
int numPassages = maxPassages[i];
Term floor = new Term(field, "");
Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
SortedSet<Term> fieldTerms = queryTerms.subSet(floor, ceiling);
// TODO: should we have some reasonable defaults for term pruning? (e.g. stopwords)
// Strip off the redundant field:
BytesRef terms[] = new BytesRef[fieldTerms.size()];
int termUpto = 0;
for(Term term : fieldTerms) {
terms[termUpto++] = term.bytes();
}
Map<Integer,Object> fieldHighlights = highlightField(field, contents[i], getBreakIterator(field), terms, docids, leaves, numPassages);
Object[] result = new Object[docids.length];
for (int j = 0; j < docidsIn.length; j++) {
result[j] = fieldHighlights.get(docidsIn[j]);
}
highlights.put(field, result);
}
return highlights;
}
/** Loads the String values for each field X docID to be
* highlighted. By default this loads from stored
* fields, but a subclass can change the source. This
* method should allocate the String[fields.length][docids.length]
* and fill all values. The returned Strings must be
* identical to what was indexed. */
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
String contents[][] = new String[fields.length][docids.length];
char valueSeparators[] = new char[fields.length];
for (int i = 0; i < fields.length; i++) {
valueSeparators[i] = getMultiValuedSeparator(fields[i]);
}
LimitedStoredFieldVisitor visitor = new LimitedStoredFieldVisitor(fields, valueSeparators, maxLength);
for (int i = 0; i < docids.length; i++) {
searcher.doc(docids[i], visitor);
for (int j = 0; j < fields.length; j++) {
contents[j][i] = visitor.getValue(j);
}
visitor.reset();
}
return contents;
}
/**
* Returns the logical separator between values for multi-valued fields.
* The default value is a space character, which means passages can span across values,
* but a subclass can override, for example with {@code U+2029 PARAGRAPH SEPARATOR (PS)}
* if each value holds a discrete passage for highlighting.
*/
protected char getMultiValuedSeparator(String field) {
return ' ';
}
//BEGIN EDIT: made protected so that we can call from our subclass and pass in the terms by ourselves
protected Map<Integer,Object> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<LeafReaderContext> leaves, int maxPassages) throws IOException {
//private Map<Integer,Object> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<LeafReaderContext > leaves, int maxPassages) throws IOException {
//END EDIT
Map<Integer,Object> highlights = new HashMap<>();
// reuse in the real sense... for docs in same segment we just advance our old enum
PostingsEnum postings[] = null;
TermsEnum termsEnum = null;
int lastLeaf = -1;
PassageFormatter fieldFormatter = getFormatter(field);
if (fieldFormatter == null) {
throw new NullPointerException("PassageFormatter cannot be null");
}
for (int i = 0; i < docids.length; i++) {
String content = contents[i];
if (content.length() == 0) {
continue; // nothing to do
}
bi.setText(content);
int doc = docids[i];
int leaf = ReaderUtil.subIndex(doc, leaves);
LeafReaderContext subContext = leaves.get(leaf);
LeafReader r = subContext.reader();
Terms t = r.terms(field);
if (t == null) {
continue; // nothing to do
}
if (!t.hasOffsets()) {
// no offsets available
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
}
if (leaf != lastLeaf) {
termsEnum = t.iterator();
postings = new PostingsEnum[terms.length];
}
Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
if (passages.length == 0) {
passages = getEmptyHighlight(field, bi, maxPassages);
}
if (passages.length > 0) {
// otherwise a null snippet (eg if field is missing
// entirely from the doc)
highlights.put(doc, fieldFormatter.format(passages, content));
}
lastLeaf = leaf;
}
return highlights;
}
// algorithm: treat sentence snippets as miniature documents
// we can intersect these with the postings lists via BreakIterator.preceding(offset),s
// score each sentence as norm(sentenceStartOffset) * sum(weight * tf(freq))
private Passage[] highlightDoc(String field, BytesRef terms[], int contentLength, BreakIterator bi, int doc,
TermsEnum termsEnum, PostingsEnum[] postings, int n) throws IOException {
//BEGIN EDIT added call to method that returns the offset for the current value (discrete highlighting)
int valueOffset = getOffsetForCurrentValue(field, doc);
//END EDIT
PassageScorer scorer = getScorer(field);
if (scorer == null) {
throw new NullPointerException("PassageScorer cannot be null");
}
//BEGIN EDIT discrete highlighting
// the scoring needs to be based on the length of the whole field (all values rather than only the current one)
int totalContentLength = getContentLength(field, doc);
if (totalContentLength == -1) {
totalContentLength = contentLength;
}
//END EDIT
PriorityQueue<OffsetsEnum> pq = new PriorityQueue<>();
float weights[] = new float[terms.length];
// initialize postings
for (int i = 0; i < terms.length; i++) {
PostingsEnum de = postings[i];
int pDoc;
if (de == EMPTY) {
continue;
} else if (de == null) {
postings[i] = EMPTY; // initially
if (!termsEnum.seekExact(terms[i])) {
continue; // term not found
}
de = postings[i] = termsEnum.postings(null, null, PostingsEnum.OFFSETS);
assert de != null;
pDoc = de.advance(doc);
} else {
pDoc = de.docID();
if (pDoc < doc) {
pDoc = de.advance(doc);
}
}
if (doc == pDoc) {
//BEGIN EDIT we take into account the length of the whole field (all values) to properly score the snippets
weights[i] = scorer.weight(totalContentLength, de.freq());
//weights[i] = scorer.weight(contentLength, de.freq());
//END EDIT
de.nextPosition();
pq.add(new OffsetsEnum(de, i));
}
}
pq.add(new OffsetsEnum(EMPTY, Integer.MAX_VALUE)); // a sentinel for termination
PriorityQueue<Passage> passageQueue = new PriorityQueue<>(n, new Comparator<Passage>() {
@Override
public int compare(Passage left, Passage right) {
if (left.score < right.score) {
return -1;
} else if (left.score > right.score) {
return 1;
} else {
return left.startOffset - right.startOffset;
}
}
});
Passage current = new Passage();
OffsetsEnum off;
while ((off = pq.poll()) != null) {
final PostingsEnum dp = off.dp;
int start = dp.startOffset();
assert start >= 0;
int end = dp.endOffset();
// LUCENE-5166: this hit would span the content limit... however more valid
// hits may exist (they are sorted by start). so we pretend like we never
// saw this term, it won't cause a passage to be added to passageQueue or anything.
assert EMPTY.startOffset() == Integer.MAX_VALUE;
if (start < contentLength && end > contentLength) {
continue;
}
//BEGIN EDIT support for discrete highlighting (added block code)
//switch to the first match in the current value if there is one
boolean seenEnough = false;
while (start < valueOffset) {
if (off.pos == dp.freq()) {
seenEnough = true;
break;
} else {
off.pos++;
dp.nextPosition();
start = dp.startOffset();
end = dp.endOffset();
}
}
//continue with next term if we've already seen the current one all the times it appears
//that means that the current value doesn't hold matches for the current term
if (seenEnough) {
continue;
}
//we now subtract the offset of the current value to both start and end
start -= valueOffset;
end -= valueOffset;
//END EDIT
if (start >= current.endOffset) {
if (current.startOffset >= 0) {
// finalize current
//BEGIN EDIT we take into account the value offset when scoring the snippet based on its position
current.score *= scorer.norm(current.startOffset + valueOffset);
//current.score *= scorer.norm(current.startOffset);
//END EDIT
// new sentence: first add 'current' to queue
if (passageQueue.size() == n && current.score < passageQueue.peek().score) {
current.reset(); // can't compete, just reset it
} else {
passageQueue.offer(current);
if (passageQueue.size() > n) {
current = passageQueue.poll();
current.reset();
} else {
current = new Passage();
}
}
}
// if we exceed limit, we are done
if (start >= contentLength) {
Passage passages[] = new Passage[passageQueue.size()];
passageQueue.toArray(passages);
for (Passage p : passages) {
p.sort();
}
// sort in ascending order
Arrays.sort(passages, new Comparator<Passage>() {
@Override
public int compare(Passage left, Passage right) {
return left.startOffset - right.startOffset;
}
});
return passages;
}
// advance breakiterator
assert BreakIterator.DONE < 0;
current.startOffset = Math.max(bi.preceding(start+1), 0);
current.endOffset = Math.min(bi.next(), contentLength);
}
int tf = 0;
while (true) {
tf++;
current.addMatch(start, end, terms[off.id]);
if (off.pos == dp.freq()) {
break; // removed from pq
} else {
off.pos++;
dp.nextPosition();
//BEGIN EDIT support for discrete highlighting
start = dp.startOffset() - valueOffset;
end = dp.endOffset() - valueOffset;
//start = dp.startOffset();
//end = dp.endOffset();
//END EDIT
}
if (start >= current.endOffset || end > contentLength) {
pq.offer(off);
break;
}
}
current.score += weights[off.id] * scorer.tf(tf, current.endOffset - current.startOffset);
}
// Dead code but compiler disagrees:
assert false;
return null;
}
/** Called to summarize a document when no hits were
* found. By default this just returns the first
* {@code maxPassages} sentences; subclasses can override
* to customize. */
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
// BreakIterator should be un-next'd:
List<Passage> passages = new ArrayList<>();
int pos = bi.current();
assert pos == 0;
while (passages.size() < maxPassages) {
int next = bi.next();
if (next == BreakIterator.DONE) {
break;
}
Passage passage = new Passage();
passage.score = Float.NaN;
passage.startOffset = pos;
passage.endOffset = next;
passages.add(passage);
pos = next;
}
return passages.toArray(new Passage[passages.size()]);
}
private static class OffsetsEnum implements Comparable<OffsetsEnum> {
PostingsEnum dp;
int pos;
int id;
OffsetsEnum(PostingsEnum dp, int id) throws IOException {
this.dp = dp;
this.id = id;
this.pos = 1;
}
@Override
public int compareTo(OffsetsEnum other) {
try {
int off = dp.startOffset();
int otherOff = other.dp.startOffset();
if (off == otherOff) {
return id - other.id;
} else {
return Long.signum(((long)off) - otherOff);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
private static final PostingsEnum EMPTY = new PostingsEnum() {
@Override
public int nextPosition() throws IOException { return 0; }
@Override
public int startOffset() throws IOException { return Integer.MAX_VALUE; }
@Override
public int endOffset() throws IOException { return Integer.MAX_VALUE; }
@Override
public BytesRef getPayload() throws IOException { return null; }
@Override
public int freq() throws IOException { return 0; }
@Override
public int docID() { return NO_MORE_DOCS; }
@Override
public int nextDoc() throws IOException { return NO_MORE_DOCS; }
@Override
public int advance(int target) throws IOException { return NO_MORE_DOCS; }
@Override
public long cost() { return 0; }
};
private static class LimitedStoredFieldVisitor extends StoredFieldVisitor {
private final String fields[];
private final char valueSeparators[];
private final int maxLength;
private final StringBuilder builders[];
private int currentField = -1;
public LimitedStoredFieldVisitor(String fields[], char valueSeparators[], int maxLength) {
assert fields.length == valueSeparators.length;
this.fields = fields;
this.valueSeparators = valueSeparators;
this.maxLength = maxLength;
builders = new StringBuilder[fields.length];
for (int i = 0; i < builders.length; i++) {
builders[i] = new StringBuilder();
}
}
@Override
public void stringField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
String value = new String(bytes, StandardCharsets.UTF_8);
assert currentField >= 0;
StringBuilder builder = builders[currentField];
if (builder.length() > 0 && builder.length() < maxLength) {
builder.append(valueSeparators[currentField]);
}
if (builder.length() + value.length() > maxLength) {
builder.append(value, 0, maxLength - builder.length());
} else {
builder.append(value);
}
}
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
currentField = Arrays.binarySearch(fields, fieldInfo.name);
if (currentField < 0) {
return Status.NO;
} else if (builders[currentField].length() > maxLength) {
return fields.length == 1 ? Status.STOP : Status.NO;
}
return Status.YES;
}
String getValue(int i) {
return builders[i].toString();
}
void reset() {
currentField = -1;
for (int i = 0; i < fields.length; i++) {
builders[i].setLength(0);
}
}
}
}

View File

@ -22,16 +22,18 @@ package org.elasticsearch;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexException;
import org.elasticsearch.rest.HasRestHeaders;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.*;
/**
* A base class for all elasticsearch exceptions.
@ -288,6 +290,4 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
public String toString() {
return ExceptionsHelper.detailedMessage(this).trim();
}
}

View File

@ -22,15 +22,21 @@ package org.elasticsearch;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexException;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
*
@ -214,4 +220,59 @@ public final class ExceptionsHelper {
}
return true;
}
/**
* Deduplicate the failures by exception message and index.
*/
public static ShardOperationFailedException[] groupBy(ShardOperationFailedException[] failures) {
List<ShardOperationFailedException> uniqueFailures = new ArrayList<>();
Set<GroupBy> reasons = new HashSet<>();
for (ShardOperationFailedException failure : failures) {
GroupBy reason = new GroupBy(failure.getCause());
if (reasons.contains(reason) == false) {
reasons.add(reason);
uniqueFailures.add(failure);
}
}
return uniqueFailures.toArray(new ShardOperationFailedException[0]);
}
static class GroupBy {
final String reason;
final Index index;
final Class<? extends Throwable> causeType;
public GroupBy(Throwable t) {
if (t instanceof IndexException) {
index = ((IndexException) t).index();
} else {
index = null;
}
reason = t.getMessage();
causeType = t.getClass();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GroupBy groupBy = (GroupBy) o;
if (!causeType.equals(groupBy.causeType)) return false;
if (index != null ? !index.equals(groupBy.index) : groupBy.index != null) return false;
if (reason != null ? !reason.equals(groupBy.reason) : groupBy.reason != null) return false;
return true;
}
@Override
public int hashCode() {
int result = reason != null ? reason.hashCode() : 0;
result = 31 * result + (index != null ? index.hashCode() : 0);
result = 31 * result + causeType.hashCode();
return result;
}
}
}

View File

@ -231,6 +231,11 @@ public abstract class ActionWriteResponse extends ActionResponse {
return status;
}
@Override
public Throwable getCause() {
return cause;
}
/**
* @return Whether this failure occurred on a primary shard.
* (this only reports true for delete by query)

View File

@ -20,16 +20,23 @@
package org.elasticsearch.action;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexException;
import org.elasticsearch.rest.RestStatus;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* An exception indicating that a failure occurred performing an operation on the shard.
*
*
*/
public interface ShardOperationFailedException extends Streamable, Serializable {
public interface ShardOperationFailedException extends Streamable, Serializable, ToXContent {
/**
* The index the operation failed on. Might return <tt>null</tt> if it can't be derived.
@ -50,4 +57,9 @@ public interface ShardOperationFailedException extends Streamable, Serializable
* The status of the failure.
*/
RestStatus status();
/**
* The cause of this failure
*/
Throwable getCause();
}

View File

@ -66,7 +66,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA
@Override
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) {
SnapshotsService.SnapshotRequest snapshotRequest =
new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
new SnapshotsService.SnapshotRequest("create_snapshot [" + request.snapshot() + "]", request.snapshot(), request.repository())
.indices(request.indices())
.indicesOptions(request.indicesOptions())
.partial(request.partial())

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.stats.PercolateStats;
@ -48,7 +47,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
private StoreStats store;
private FieldDataStats fieldData;
private FilterCacheStats filterCache;
private IdCacheStats idCache;
private CompletionStats completion;
private SegmentsStats segments;
private PercolateStats percolate;
@ -63,7 +61,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
this.store = new StoreStats();
this.fieldData = new FieldDataStats();
this.filterCache = new FilterCacheStats();
this.idCache = new IdCacheStats();
this.completion = new CompletionStats();
this.segments = new SegmentsStats();
this.percolate = new PercolateStats();
@ -87,7 +84,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store.add(shardCommonStats.store);
fieldData.add(shardCommonStats.fieldData);
filterCache.add(shardCommonStats.filterCache);
idCache.add(shardCommonStats.idCache);
completion.add(shardCommonStats.completion);
segments.add(shardCommonStats.segments);
percolate.add(shardCommonStats.percolate);
@ -125,10 +121,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
return filterCache;
}
public IdCacheStats getIdCache() {
return idCache;
}
public CompletionStats getCompletion() {
return completion;
}
@ -149,7 +141,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store = StoreStats.readStoreStats(in);
fieldData = FieldDataStats.readFieldDataStats(in);
filterCache = FilterCacheStats.readFilterCacheStats(in);
idCache = IdCacheStats.readIdCacheStats(in);
completion = CompletionStats.readCompletionStats(in);
segments = SegmentsStats.readSegmentsStats(in);
percolate = PercolateStats.readPercolateStats(in);
@ -163,7 +154,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store.writeTo(out);
fieldData.writeTo(out);
filterCache.writeTo(out);
idCache.writeTo(out);
completion.writeTo(out);
segments.writeTo(out);
percolate.writeTo(out);
@ -187,7 +177,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store.toXContent(builder, params);
fieldData.toXContent(builder, params);
filterCache.toXContent(builder, params);
idCache.toXContent(builder, params);
completion.toXContent(builder, params);
segments.toXContent(builder, params);
percolate.toXContent(builder, params);

View File

@ -56,8 +56,8 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction<C
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.IdCache,
CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.Percolate);
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
CommonStatsFlags.Flag.Percolate);
private final NodeService nodeService;
private final IndicesService indicesService;

View File

@ -36,7 +36,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
*/
public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest> {
private String text;
private String[] text;
private String analyzer;
@ -61,11 +61,11 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
this.index(index);
}
public String text() {
public String[] text() {
return this.text;
}
public AnalyzeRequest text(String text) {
public AnalyzeRequest text(String... text) {
this.text = text;
return this;
}
@ -118,7 +118,7 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (text == null) {
if (text == null || text.length == 0) {
validationException = addValidationError("text is missing", validationException);
}
if (tokenFilters == null) {
@ -133,7 +133,7 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
text = in.readString();
text = in.readStringArray();
analyzer = in.readOptionalString();
tokenizer = in.readOptionalString();
tokenFilters = in.readStringArray();
@ -144,7 +144,7 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(text);
out.writeStringArray(text);
out.writeOptionalString(analyzer);
out.writeOptionalString(tokenizer);
out.writeStringArray(tokenFilters);

View File

@ -30,7 +30,7 @@ public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder<A
super(client, action, new AnalyzeRequest());
}
public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action, String index, String text) {
public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action, String index, String... text) {
super(client, action, new AnalyzeRequest(index).text(text));
}
@ -86,4 +86,12 @@ public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder<A
request.charFilters(charFilters);
return this;
}
/**
* Sets texts to analyze
*/
public AnalyzeRequestBuilder setText(String... texts) {
request.text(texts);
return this;
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.ActionFilters;
@ -210,38 +211,43 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
TokenStream stream = null;
try {
stream = analyzer.tokenStream(field, request.text());
stream.reset();
CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
TypeAttribute type = stream.addAttribute(TypeAttribute.class);
int lastPosition = -1;
int lastOffset = 0;
for (String text : request.text()) {
try {
stream = analyzer.tokenStream(field, text);
stream.reset();
CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
TypeAttribute type = stream.addAttribute(TypeAttribute.class);
while (stream.incrementToken()) {
int increment = posIncr.getPositionIncrement();
if (increment > 0) {
lastPosition = lastPosition + increment;
}
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), lastOffset + offset.endOffset(), type.type()));
int position = -1;
while (stream.incrementToken()) {
int increment = posIncr.getPositionIncrement();
if (increment > 0) {
position = position + increment;
}
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), position, offset.startOffset(), offset.endOffset(), type.type()));
}
stream.end();
} catch (IOException e) {
throw new ElasticsearchException("failed to analyze", e);
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException e) {
// ignore
}
}
if (closeAnalyzer) {
analyzer.close();
stream.end();
lastOffset += offset.endOffset();
lastPosition += posIncr.getPositionIncrement();
lastPosition += analyzer.getPositionIncrementGap(field);
lastOffset += analyzer.getOffsetGap(field);
} catch (IOException e) {
throw new ElasticsearchException("failed to analyze", e);
} finally {
IOUtils.closeWhileHandlingException(stream);
}
}
if (closeAnalyzer) {
analyzer.close();
}
return new AnalyzeResponse(tokens);
}
}

View File

@ -33,7 +33,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
private boolean filterCache = false;
private boolean fieldDataCache = false;
private boolean idCache = false;
private boolean recycler = false;
private boolean queryCache = false;
private String[] fields = null;
@ -82,10 +81,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
return this.fields;
}
public boolean idCache() {
return this.idCache;
}
public ClearIndicesCacheRequest recycler(boolean recycler) {
this.recycler = recycler;
return this;
@ -95,17 +90,11 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
return this.recycler;
}
public ClearIndicesCacheRequest idCache(boolean idCache) {
this.idCache = idCache;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
filterCache = in.readBoolean();
fieldDataCache = in.readBoolean();
idCache = in.readBoolean();
recycler = in.readBoolean();
fields = in.readStringArray();
queryCache = in.readBoolean();
@ -116,7 +105,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
super.writeTo(out);
out.writeBoolean(filterCache);
out.writeBoolean(fieldDataCache);
out.writeBoolean(idCache);
out.writeBoolean(recycler);
out.writeStringArrayNullable(fields);
out.writeBoolean(queryCache);

View File

@ -51,8 +51,4 @@ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBu
return this;
}
public ClearIndicesCacheRequestBuilder setIdCache(boolean idCache) {
request.idCache(idCache);
return this;
}
}

View File

@ -34,7 +34,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
private boolean filterCache = false;
private boolean fieldDataCache = false;
private boolean idCache = false;
private boolean recycler;
private boolean queryCache = false;
@ -47,7 +46,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
super(shardId, request);
filterCache = request.filterCache();
fieldDataCache = request.fieldDataCache();
idCache = request.idCache();
fields = request.fields();
recycler = request.recycler();
queryCache = request.queryCache();
@ -65,10 +63,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
return this.fieldDataCache;
}
public boolean idCache() {
return this.idCache;
}
public boolean recycler() {
return this.recycler;
}
@ -77,17 +71,11 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
return this.fields;
}
public ShardClearIndicesCacheRequest waitForOperations(boolean waitForOperations) {
this.filterCache = waitForOperations;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
filterCache = in.readBoolean();
fieldDataCache = in.readBoolean();
idCache = in.readBoolean();
recycler = in.readBoolean();
fields = in.readStringArray();
queryCache = in.readBoolean();
@ -98,7 +86,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
super.writeTo(out);
out.writeBoolean(filterCache);
out.writeBoolean(fieldDataCache);
out.writeBoolean(idCache);
out.writeBoolean(recycler);
out.writeStringArrayNullable(fields);
out.writeBoolean(queryCache);

View File

@ -125,10 +125,6 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
clearedAtLeastOne = true;
// cacheRecycler.clear();
}
if (request.idCache()) {
clearedAtLeastOne = true;
service.fieldData().clearField(ParentFieldMapper.NAME);
}
if (!clearedAtLeastOne) {
if (request.fields() != null && request.fields().length > 0) {
// only clear caches relating to the specified fields

View File

@ -30,8 +30,6 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
private String source;
private boolean ignoreConflicts = false;
PutMappingClusterStateUpdateRequest() {
}
@ -53,13 +51,4 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
this.source = source;
return this;
}
public boolean ignoreConflicts() {
return ignoreConflicts;
}
public PutMappingClusterStateUpdateRequest ignoreConflicts(boolean ignoreConflicts) {
this.ignoreConflicts = ignoreConflicts;
return this;
}
}

View File

@ -42,8 +42,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* {@link org.elasticsearch.client.Requests#putMappingRequest(String...)}.
* <p/>
* <p>If the mappings already exists, the new mappings will be merged with the new one. If there are elements
* that can't be merged are detected, the request will be rejected unless the {@link #ignoreConflicts(boolean)}
* is set. In such a case, the duplicate mappings will be rejected.
* that can't be merged are detected, the request will be rejected.
*
* @see org.elasticsearch.client.Requests#putMappingRequest(String...)
* @see org.elasticsearch.client.IndicesAdminClient#putMapping(PutMappingRequest)
@ -64,8 +63,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
private String source;
private boolean ignoreConflicts = false;
PutMappingRequest() {
}
@ -239,25 +236,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
return this;
}
/**
* If there is already a mapping definition registered against the type, then it will be merged. If there are
* elements that can't be merged are detected, the request will be rejected unless the
* {@link #ignoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
*/
public boolean ignoreConflicts() {
return ignoreConflicts;
}
/**
* If there is already a mapping definition registered against the type, then it will be merged. If there are
* elements that can't be merged are detected, the request will be rejected unless the
* {@link #ignoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
*/
public PutMappingRequest ignoreConflicts(boolean ignoreDuplicates) {
this.ignoreConflicts = ignoreDuplicates;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -266,7 +244,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
type = in.readOptionalString();
source = in.readString();
readTimeout(in);
ignoreConflicts = in.readBoolean();
}
@Override
@ -277,6 +254,5 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
out.writeOptionalString(type);
out.writeString(source);
writeTimeout(out);
out.writeBoolean(ignoreConflicts);
}
}

View File

@ -91,13 +91,4 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this;
}
/**
* If there is already a mapping definition registered against the type, then it will be merged. If there are
* elements that can't be merged are detected, the request will be rejected unless the
* {@link #setIgnoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
*/
public PutMappingRequestBuilder setIgnoreConflicts(boolean ignoreConflicts) {
request.ignoreConflicts(ignoreConflicts);
return this;
}
}

View File

@ -70,7 +70,7 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices).type(request.type())
.source(request.source()).ignoreConflicts(request.ignoreConflicts());
.source(request.source());
metaDataMappingService.putMapping(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
@ -92,9 +91,6 @@ public class CommonStats implements Streamable, ToXContent {
case FilterCache:
filterCache = new FilterCacheStats();
break;
case IdCache:
idCache = new IdCacheStats();
break;
case FieldData:
fieldData = new FieldDataStats();
break;
@ -161,9 +157,6 @@ public class CommonStats implements Streamable, ToXContent {
case FilterCache:
filterCache = indexShard.filterCacheStats();
break;
case IdCache:
idCache = indexShard.idCacheStats();
break;
case FieldData:
fieldData = indexShard.fieldDataStats(flags.fieldDataFields());
break;
@ -224,9 +217,6 @@ public class CommonStats implements Streamable, ToXContent {
@Nullable
public FilterCacheStats filterCache;
@Nullable
public IdCacheStats idCache;
@Nullable
public FieldDataStats fieldData;
@ -333,15 +323,6 @@ public class CommonStats implements Streamable, ToXContent {
filterCache.add(stats.getFilterCache());
}
if (idCache == null) {
if (stats.getIdCache() != null) {
idCache = new IdCacheStats();
idCache.add(stats.getIdCache());
}
} else {
idCache.add(stats.getIdCache());
}
if (fieldData == null) {
if (stats.getFieldData() != null) {
fieldData = new FieldDataStats();
@ -458,11 +439,6 @@ public class CommonStats implements Streamable, ToXContent {
return this.filterCache;
}
@Nullable
public IdCacheStats getIdCache() {
return this.idCache;
}
@Nullable
public FieldDataStats getFieldData() {
return this.fieldData;
@ -511,7 +487,7 @@ public class CommonStats implements Streamable, ToXContent {
/**
* Utility method which computes total memory by adding
* FieldData, IdCache, Percolate, Segments (memory, index writer, version map)
* FieldData, Percolate, Segments (memory, index writer, version map)
*/
public ByteSizeValue getTotalMemory() {
long size = 0;
@ -521,9 +497,6 @@ public class CommonStats implements Streamable, ToXContent {
if (this.getFilterCache() != null) {
size += this.getFilterCache().getMemorySizeInBytes();
}
if (this.getIdCache() != null) {
size += this.getIdCache().getMemorySizeInBytes();
}
if (this.getPercolate() != null) {
size += this.getPercolate().getMemorySizeInBytes();
}
@ -568,9 +541,6 @@ public class CommonStats implements Streamable, ToXContent {
if (in.readBoolean()) {
filterCache = FilterCacheStats.readFilterCacheStats(in);
}
if (in.readBoolean()) {
idCache = IdCacheStats.readIdCacheStats(in);
}
if (in.readBoolean()) {
fieldData = FieldDataStats.readFieldDataStats(in);
}
@ -651,12 +621,6 @@ public class CommonStats implements Streamable, ToXContent {
out.writeBoolean(true);
filterCache.writeTo(out);
}
if (idCache == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
idCache.writeTo(out);
}
if (fieldData == null) {
out.writeBoolean(false);
} else {
@ -720,9 +684,6 @@ public class CommonStats implements Streamable, ToXContent {
if (filterCache != null) {
filterCache.toXContent(builder, params);
}
if (idCache != null) {
idCache.toXContent(builder, params);
}
if (fieldData != null) {
fieldData.toXContent(builder, params);
}

View File

@ -216,7 +216,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
Flush("flush"),
Refresh("refresh"),
FilterCache("filter_cache"),
IdCache("id_cache"),
FieldData("fielddata"),
Docs("docs"),
Warmer("warmer"),

View File

@ -175,15 +175,6 @@ public class IndicesStatsRequest extends BroadcastOperationRequest<IndicesStatsR
return flags.isSet(Flag.FilterCache);
}
public IndicesStatsRequest idCache(boolean idCache) {
flags.set(Flag.IdCache, idCache);
return this;
}
public boolean idCache() {
return flags.isSet(Flag.IdCache);
}
public IndicesStatsRequest fieldData(boolean fieldData) {
flags.set(Flag.FieldData, fieldData);
return this;

View File

@ -117,11 +117,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
return this;
}
public IndicesStatsRequestBuilder setIdCache(boolean idCache) {
request.idCache(idCache);
return this;
}
public IndicesStatsRequestBuilder setFieldData(boolean fieldData) {
request.fieldData(fieldData);
return this;

View File

@ -161,9 +161,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi
if (request.request.filterCache()) {
flags.set(CommonStatsFlags.Flag.FilterCache);
}
if (request.request.idCache()) {
flags.set(CommonStatsFlags.Flag.IdCache);
}
if (request.request.fieldData()) {
flags.set(CommonStatsFlags.Flag.FieldData);
flags.fieldDataFields(request.request.fieldDataFields());

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionWriteResponse;
import org.elasticsearch.action.delete.DeleteResponse;
@ -27,6 +28,7 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@ -44,26 +46,17 @@ public class BulkItemResponse implements Streamable {
private final String index;
private final String type;
private final String id;
private final String message;
private final Throwable cause;
private final RestStatus status;
public Failure(String index, String type, String id, Throwable t) {
this.index = index;
this.type = type;
this.id = id;
this.message = t.toString();
this.cause = t;
this.status = ExceptionsHelper.status(t);
}
public Failure(String index, String type, String id, String message, RestStatus status) {
this.index = index;
this.type = type;
this.id = id;
this.message = message;
this.status = status;
}
/**
* The index name of the action.
*/
@ -89,7 +82,7 @@ public class BulkItemResponse implements Streamable {
* The failure message.
*/
public String getMessage() {
return this.message;
return this.cause.toString();
}
/**
@ -98,6 +91,10 @@ public class BulkItemResponse implements Streamable {
public RestStatus getStatus() {
return this.status;
}
public Throwable getCause() {
return cause;
}
}
private int id;
@ -265,9 +262,8 @@ public class BulkItemResponse implements Streamable {
String fIndex = in.readString();
String fType = in.readString();
String fId = in.readOptionalString();
String fMessage = in.readString();
RestStatus status = RestStatus.readFrom(in);
failure = new Failure(fIndex, fType, fId, fMessage, status);
Throwable throwable = in.readThrowable();
failure = new Failure(fIndex, fType, fId, throwable);
}
}
@ -295,8 +291,7 @@ public class BulkItemResponse implements Streamable {
out.writeString(failure.getIndex());
out.writeString(failure.getType());
out.writeOptionalString(failure.getId());
out.writeString(failure.getMessage());
RestStatus.writeTo(out, failure.getStatus());
out.writeThrowable(failure.getCause());
}
}
}

View File

@ -284,7 +284,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(updateRequest.type());
if (mappingMd != null && mappingMd.routing().required() && updateRequest.routing() == null) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(),
updateRequest.id(), "routing is required for this item", RestStatus.BAD_REQUEST);
updateRequest.id(), new IllegalArgumentException("routing is required for this item"));
responses.set(i, new BulkItemResponse(i, updateRequest.type(), failure));
continue;
}
@ -328,21 +328,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
@Override
public void onFailure(Throwable e) {
// create failures for all relevant requests
String message = ExceptionsHelper.detailedMessage(e);
RestStatus status = ExceptionsHelper.status(e);
for (BulkItemRequest request : requests) {
if (request.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), message, status)));
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), e)));
} else if (request.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), message, status)));
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), e)));
} else if (request.request() instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "update",
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), message, status)));
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), e)));
}
}
if (counter.decrementAndGet() == 0) {

View File

@ -116,7 +116,7 @@ public class PercolateResponse extends BroadcastOperationResponse implements Ite
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.TOOK, tookInMillis);
RestActions.buildBroadcastShardsHeader(builder, this);
RestActions.buildBroadcastShardsHeader(builder, params, this);
builder.field(Fields.TOTAL, count);
if (matches != null) {

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.get.GetRequest;

View File

@ -20,6 +20,8 @@
package org.elasticsearch.action.search;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexException;
@ -92,8 +94,8 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
builder.field("grouped", group); // notify that it's grouped
builder.field("failed_shards");
builder.startArray();
ShardSearchFailure[] failures = params.paramAsBoolean("group_shard_failures", true) ? groupBy(shardFailures) : shardFailures;
for (ShardSearchFailure failure : failures) {
ShardOperationFailedException[] failures = params.paramAsBoolean("group_shard_failures", true) ? ExceptionsHelper.groupBy(shardFailures) : shardFailures;
for (ShardOperationFailedException failure : failures) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();
@ -103,25 +105,11 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
}
private ShardSearchFailure[] groupBy(ShardSearchFailure[] failures) {
List<ShardSearchFailure> uniqueFailures = new ArrayList<>();
Set<GroupBy> reasons = new HashSet<>();
for (ShardSearchFailure failure : failures) {
GroupBy reason = new GroupBy(failure.getCause());
if (reasons.contains(reason) == false) {
reasons.add(reason);
uniqueFailures.add(failure);
}
}
return uniqueFailures.toArray(new ShardSearchFailure[0]);
}
@Override
public ElasticsearchException[] guessRootCauses() {
ShardSearchFailure[] failures = groupBy(shardFailures);
ShardOperationFailedException[] failures = ExceptionsHelper.groupBy(shardFailures);
List<ElasticsearchException> rootCauses = new ArrayList<>(failures.length);
for (ShardSearchFailure failure : failures) {
for (ShardOperationFailedException failure : failures) {
ElasticsearchException[] guessRootCauses = ElasticsearchException.guessRootCauses(failure.getCause());
rootCauses.addAll(Arrays.asList(guessRootCauses));
}
@ -132,42 +120,4 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
public String toString() {
return buildMessage(phaseName, getMessage(), shardFailures);
}
static class GroupBy {
final String reason;
final Index index;
final Class<? extends Throwable> causeType;
public GroupBy(Throwable t) {
if (t instanceof IndexException) {
index = ((IndexException) t).index();
} else {
index = null;
}
reason = t.getMessage();
causeType = t.getClass();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GroupBy groupBy = (GroupBy) o;
if (!causeType.equals(groupBy.causeType)) return false;
if (index != null ? !index.equals(groupBy.index) : groupBy.index != null) return false;
if (reason != null ? !reason.equals(groupBy.reason) : groupBy.reason != null) return false;
return true;
}
@Override
public int hashCode() {
int result = reason != null ? reason.hashCode() : 0;
result = 31 * result + (index != null ? index.hashCode() : 0);
result = 31 * result + causeType.hashCode();
return result;
}
}
}

View File

@ -177,7 +177,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
if (isTerminatedEarly() != null) {
builder.field(Fields.TERMINATED_EARLY, isTerminatedEarly());
}
RestActions.buildBroadcastShardsHeader(builder, getTotalShards(), getSuccessfulShards(), getFailedShards(), getShardFailures());
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(), getShardFailures());
internalResponse.toXContent(builder, params);
return builder;
}

View File

@ -40,7 +40,7 @@ import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
/**
* Represents a failure to search on a specific shard.
*/
public class ShardSearchFailure implements ShardOperationFailedException, ToXContent {
public class ShardSearchFailure implements ShardOperationFailedException {
public static final ShardSearchFailure[] EMPTY_ARRAY = new ShardSearchFailure[0];
@ -172,6 +172,7 @@ public class ShardSearchFailure implements ShardOperationFailedException, ToXCon
return builder;
}
@Override
public Throwable getCause() {
return cause;
}

View File

@ -24,6 +24,8 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.IndexShardException;
import org.elasticsearch.rest.RestStatus;
@ -81,6 +83,11 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile
return status;
}
@Override
public Throwable getCause() {
return reason;
}
public static DefaultShardOperationFailedException readShardOperationFailed(StreamInput in) throws IOException {
DefaultShardOperationFailedException exp = new DefaultShardOperationFailedException();
exp.readFrom(in);
@ -114,4 +121,19 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile
public String toString() {
return "[" + index + "][" + shardId + "] failed, reason [" + reason() + "]";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("shard", shardId());
builder.field("index", index());
builder.field("status", status.name());
if (reason != null) {
builder.field("reason");
builder.startObject();
ElasticsearchException.toXContent(builder, params, reason);
builder.endObject();
}
return builder;
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.support.single.shard;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.NoShardAvailableActionException;

View File

@ -28,7 +28,7 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BoostAttribute;
import org.apache.lucene.util.*;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.util.Arrays;
@ -125,9 +125,8 @@ public final class TermVectorsFields extends Fields {
* @param termVectors Stores the actual term vectors as a {@link BytesRef}.
*/
public TermVectorsFields(BytesReference headerRef, BytesReference termVectors) throws IOException {
BytesStreamInput header = new BytesStreamInput(headerRef);
StreamInput header = StreamInput.wrap(headerRef.toBytesArray());
fieldMap = new ObjectLongHashMap<>();
// here we read the header to fill the field offset map
String headerString = header.readString();
assert headerString.equals("TV");
@ -185,7 +184,7 @@ public final class TermVectorsFields extends Fields {
private final class TermVector extends Terms {
private final BytesStreamInput perFieldTermVectorInput;
private final StreamInput perFieldTermVectorInput;
private final long readOffset;
private long numTerms;
@ -197,7 +196,7 @@ public final class TermVectorsFields extends Fields {
private int docCount;
public TermVector(BytesReference termVectors, long readOffset) throws IOException {
this.perFieldTermVectorInput = new BytesStreamInput(termVectors);
this.perFieldTermVectorInput = StreamInput.wrap(termVectors.toBytesArray());
this.readOffset = readOffset;
reset();
}
@ -270,7 +269,7 @@ public final class TermVectorsFields extends Fields {
}
}
private void writeInfos(final BytesStreamInput input) throws IOException {
private void writeInfos(final StreamInput input) throws IOException {
for (int i = 0; i < freq; i++) {
if (hasPositions) {
positions[i] = input.readVInt();
@ -484,7 +483,7 @@ public final class TermVectorsFields extends Fields {
// the writer writes a 0 for -1 or value +1 and accordingly we have to
// substract 1 again
// adds one to mock not existing term freq
int readPotentiallyNegativeVInt(BytesStreamInput stream) throws IOException {
int readPotentiallyNegativeVInt(StreamInput stream) throws IOException {
return stream.readVInt() - 1;
}
@ -492,7 +491,7 @@ public final class TermVectorsFields extends Fields {
// case, the writer writes a 0 for -1 or value +1 and accordingly we have to
// substract 1 again
// adds one to mock not existing term freq
long readPotentiallyNegativeVLong(BytesStreamInput stream) throws IOException {
long readPotentiallyNegativeVLong(StreamInput stream) throws IOException {
return stream.readVLong() - 1;
}
}

View File

@ -611,6 +611,12 @@ public interface IndicesAdminClient extends ElasticsearchClient {
*/
AnalyzeRequestBuilder prepareAnalyze(String text);
/**
* Analyze text/texts.
*
*/
AnalyzeRequestBuilder prepareAnalyze();
/**
* Puts an index template.
*/

View File

@ -1497,6 +1497,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new AnalyzeRequestBuilder(this, AnalyzeAction.INSTANCE, null, text);
}
@Override
public AnalyzeRequestBuilder prepareAnalyze() {
return new AnalyzeRequestBuilder(this, AnalyzeAction.INSTANCE);
}
@Override
public ActionFuture<PutIndexTemplateResponse> putTemplate(final PutIndexTemplateRequest request) {
return execute(PutIndexTemplateAction.INSTANCE, request);

View File

@ -38,7 +38,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -675,7 +674,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
* @param localNode used to set the local node in the cluster state.
*/
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
return readFrom(new BytesStreamInput(data), localNode);
return readFrom(StreamInput.wrap(data), localNode);
}
/**

View File

@ -382,8 +382,8 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) {
// first, simulate
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true);
// if we have conflicts, and we are not supposed to ignore them, throw an exception
if (!request.ignoreConflicts() && mergeResult.hasConflicts()) {
// if we have conflicts, throw an exception
if (mergeResult.hasConflicts()) {
throw new MergeMappingException(mergeResult.buildConflicts());
}
}

View File

@ -23,20 +23,21 @@ import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
import com.carrotsearch.hppc.ObjectObjectHashMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
*
*/
public class ContextHolder {
public class ContextAndHeaderHolder<T> implements HasContextAndHeaders {
private ObjectObjectHashMap<Object, Object> context;
protected Map<String, Object> headers;
/**
* Attaches the given value to the context.
*
* @return The previous value that was associated with the given key in the context, or
* {@code null} if there was none.
*/
@SuppressWarnings("unchecked")
@Override
public final synchronized <V> V putInContext(Object key, Object value) {
if (context == null) {
context = new ObjectObjectHashMap<>(2);
@ -44,9 +45,7 @@ public class ContextHolder {
return (V) context.put(key, value);
}
/**
* Attaches the given values to the context
*/
@Override
public final synchronized void putAllInContext(ObjectObjectAssociativeContainer<Object, Object> map) {
if (map == null) {
return;
@ -58,72 +57,98 @@ public class ContextHolder {
}
}
/**
* @return The context value that is associated with the given key
*
* @see #putInContext(Object, Object)
*/
@SuppressWarnings("unchecked")
@Override
public final synchronized <V> V getFromContext(Object key) {
return context != null ? (V) context.get(key) : null;
}
/**
* @param defaultValue The default value that should be returned for the given key, if no
* value is currently associated with it.
*
* @return The value that is associated with the given key in the context
*
* @see #putInContext(Object, Object)
*/
@SuppressWarnings("unchecked")
@Override
public final synchronized <V> V getFromContext(Object key, V defaultValue) {
V value = getFromContext(key);
return value == null ? defaultValue : value;
}
/**
* Checks if the context contains an entry with the given key
*/
@Override
public final synchronized boolean hasInContext(Object key) {
return context != null && context.containsKey(key);
}
/**
* @return The number of values attached in the context.
*/
@Override
public final synchronized int contextSize() {
return context != null ? context.size() : 0;
}
/**
* Checks if the context is empty.
*/
@Override
public final synchronized boolean isContextEmpty() {
return context == null || context.isEmpty();
}
/**
* @return A safe immutable copy of the current context.
*/
@Override
public synchronized ImmutableOpenMap<Object, Object> getContext() {
return context != null ? ImmutableOpenMap.copyOf(context) : ImmutableOpenMap.of();
}
/**
* Copies the context from the given context holder to this context holder. Any shared keys between
* the two context will be overridden by the given context holder.
*/
public synchronized void copyContextFrom(ContextHolder other) {
@Override
public synchronized void copyContextFrom(HasContext other) {
if (other == null) {
return;
}
synchronized (other) {
if (other.context == null) {
ImmutableOpenMap<Object, Object> otherContext = other.getContext();
if (otherContext == null) {
return;
}
if (context == null) {
context = new ObjectObjectHashMap<>(other.context);
ObjectObjectHashMap<Object, Object> map = new ObjectObjectHashMap<>(other.getContext().size());
map.putAll(otherContext);
this.context = map;
} else {
context.putAll(other.context);
context.putAll(otherContext);
}
}
}
@SuppressWarnings("unchecked")
@Override
public final T putHeader(String key, Object value) {
if (headers == null) {
headers = new HashMap<>();
}
headers.put(key, value);
return (T) this;
}
@SuppressWarnings("unchecked")
@Override
public final <V> V getHeader(String key) {
return headers != null ? (V) headers.get(key) : null;
}
@Override
public final boolean hasHeader(String key) {
return headers != null && headers.containsKey(key);
}
@Override
public Set<String> getHeaders() {
return headers != null ? headers.keySet() : Collections.<String>emptySet();
}
@Override
public void copyHeadersFrom(HasHeaders from) {
if (from != null && from.getHeaders() != null && !from.getHeaders().isEmpty()) {
for (String headerName : from.getHeaders()) {
putHeader(headerName, from.getHeader(headerName));
}
}
}
@Override
public void copyContextAndHeadersFrom(HasContextAndHeaders other) {
copyContextFrom(other);
copyHeadersFrom(other);
}
}

View File

@ -0,0 +1,82 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
import org.elasticsearch.common.collect.ImmutableOpenMap;
public interface HasContext {
/**
* Attaches the given value to the context.
*
* @return The previous value that was associated with the given key in the context, or
* {@code null} if there was none.
*/
<V> V putInContext(Object key, Object value);
/**
* Attaches the given values to the context
*/
void putAllInContext(ObjectObjectAssociativeContainer<Object, Object> map);
/**
* @return The context value that is associated with the given key
*
* @see #putInContext(Object, Object)
*/
<V> V getFromContext(Object key);
/**
* @param defaultValue The default value that should be returned for the given key, if no
* value is currently associated with it.
*
* @return The value that is associated with the given key in the context
*
* @see #putInContext(Object, Object)
*/
<V> V getFromContext(Object key, V defaultValue);
/**
* Checks if the context contains an entry with the given key
*/
boolean hasInContext(Object key);
/**
* @return The number of values attached in the context.
*/
int contextSize();
/**
* Checks if the context is empty.
*/
boolean isContextEmpty();
/**
* @return A safe immutable copy of the current context.
*/
ImmutableOpenMap<Object, Object> getContext();
/**
* Copies the context from the given context holder to this context holder. Any shared keys between
* the two context will be overridden by the given context holder.
*/
void copyContextFrom(HasContext other);
}

View File

@ -17,22 +17,17 @@
* under the License.
*/
package org.elasticsearch.index.translog;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.util.BigArrays;
import java.io.IOException;
package org.elasticsearch.common;
/**
*
* marker interface
*/
public class FsSimpleTranslogTests extends TranslogTests {
public interface HasContextAndHeaders extends HasContext, HasHeaders {
@Override
protected Translog create() throws IOException {
return new Translog(shardId,
ImmutableSettings.settingsBuilder().put("index.translog.fs.type", TranslogFile.Type.SIMPLE.name()).build(),
BigArrays.NON_RECYCLING_INSTANCE, translogDir);
}
}
/**
* copies over the context and the headers
* @param other another object supporting headers and context
*/
void copyContextAndHeadersFrom(HasContextAndHeaders other);
}

View File

@ -17,21 +17,22 @@
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
package org.elasticsearch.common;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import java.util.Set;
/**
* A holder for a {@link DocIdSet} and the {@link LeafReaderContext} it is associated with.
*
*/
public class ContextDocIdSet {
public interface HasHeaders {
public final LeafReaderContext context;
public final DocIdSet docSet;
<V> V putHeader(String key, V value);
public ContextDocIdSet(LeafReaderContext context, DocIdSet docSet) {
this.context = context;
this.docSet = docSet;
}
<V> V getHeader(String key);
boolean hasHeader(String key);
Set<String> getHeaders();
void copyHeadersFrom(HasHeaders from);
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.common.bytes;
import com.google.common.base.Charsets;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.Channels;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
@ -96,7 +95,7 @@ public class BytesArray implements BytesReference {
@Override
public StreamInput streamInput() {
return new BytesStreamInput(bytes, offset, length);
return StreamInput.wrap(bytes, offset, length);
}
@Override

View File

@ -409,19 +409,24 @@ public class GeoUtils {
return point.reset(lat, lon);
} else if(parser.currentToken() == Token.VALUE_STRING) {
String data = parser.text();
int comma = data.indexOf(',');
if(comma > 0) {
lat = Double.parseDouble(data.substring(0, comma).trim());
lon = Double.parseDouble(data.substring(comma + 1).trim());
return point.reset(lat, lon);
} else {
return point.resetFromGeoHash(data);
}
return parseGeoPoint(data, point);
} else {
throw new ElasticsearchParseException("geo_point expected");
}
}
/** parse a {@link GeoPoint} from a String */
public static GeoPoint parseGeoPoint(String data, GeoPoint point) {
int comma = data.indexOf(',');
if(comma > 0) {
double lat = Double.parseDouble(data.substring(0, comma).trim());
double lon = Double.parseDouble(data.substring(comma + 1).trim());
return point.reset(lat, lon);
} else {
return point.resetFromGeoHash(data);
}
}
private GeoUtils() {
}
}

View File

@ -72,7 +72,7 @@ public class ByteBufferStreamInput extends StreamInput {
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
if (buffer.remaining() > len) {
if (buffer.remaining() < len) {
throw new EOFException();
}
buffer.get(b, offset, len);

View File

@ -1,154 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.io.stream;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import java.io.EOFException;
import java.io.IOException;
/**
*
*/
public class BytesStreamInput extends StreamInput {
protected byte buf[];
protected int pos;
protected int end;
public BytesStreamInput(BytesReference bytes) {
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
this.buf = bytes.array();
this.pos = bytes.arrayOffset();
this.end = pos + bytes.length();
}
public BytesStreamInput(byte buf[]) {
this(buf, 0, buf.length);
}
public BytesStreamInput(byte buf[], int offset, int length) {
this.buf = buf;
this.pos = offset;
this.end = offset + length;
}
@Override
public BytesReference readBytesReference(int length) throws IOException {
BytesArray bytes = new BytesArray(buf, pos, length);
pos += length;
return bytes;
}
@Override
public BytesRef readBytesRef(int length) throws IOException {
BytesRef bytes = new BytesRef(buf, pos, length);
pos += length;
return bytes;
}
@Override
public long skip(long n) throws IOException {
if (pos + n > end) {
n = end - pos;
}
if (n < 0) {
return 0;
}
pos += n;
return n;
}
public int position() {
return this.pos;
}
@Override
public int read() throws IOException {
return (pos < end) ? (buf[pos++] & 0xff) : -1;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
}
if (pos >= end) {
return -1;
}
if (pos + len > end) {
len = end - pos;
}
if (len <= 0) {
return 0;
}
System.arraycopy(buf, pos, b, off, len);
pos += len;
return len;
}
public byte[] underlyingBuffer() {
return buf;
}
@Override
public byte readByte() throws IOException {
if (pos >= end) {
throw new EOFException();
}
return buf[pos++];
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
if (len == 0) {
return;
}
if (pos >= end) {
throw new EOFException();
}
if (pos + len > end) {
len = end - pos;
}
if (len <= 0) {
throw new EOFException();
}
System.arraycopy(buf, pos, b, offset, len);
pos += len;
}
@Override
public void reset() throws IOException {
pos = 0;
}
@Override
public void close() throws IOException {
// nothing to do here...
}
}

View File

@ -1,61 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* A non-threadsafe StreamOutput that doesn't actually write the bytes to any
* stream, it only keeps track of how many bytes have been written
*/
public final class NoopStreamOutput extends StreamOutput {
private int count = 0;
/** Retrieve the number of bytes that have been written */
public int getCount() {
return count;
}
@Override
public void writeByte(byte b) throws IOException {
count++;
}
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
count += length;
}
@Override
public void flush() throws IOException {
// no-op
}
@Override
public void close() throws IOException {
// nothing to close
}
@Override
public void reset() throws IOException {
count = 0;
}
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.common.text.StringAndBytesText;
import org.elasticsearch.common.text.Text;
import org.joda.time.DateTime;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
@ -499,4 +500,19 @@ public abstract class StreamInput extends InputStream {
throw new IOException("failed to deserialize exception", e);
}
}
public static StreamInput wrap(BytesReference reference) {
if (reference.hasArray() == false) {
reference = reference.toBytesArray();
}
return wrap(reference.array(), reference.arrayOffset(), reference.length());
}
public static StreamInput wrap(byte[] bytes) {
return wrap(bytes, 0, bytes.length);
}
public static StreamInput wrap(byte[] bytes, int offset, int length) {
return new InputStreamStreamInput(new ByteArrayInputStream(bytes, offset, length));
}
}

View File

@ -1,100 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.RamUsageEstimator;
import java.io.IOException;
/**
* A {@link DocIdSet} that matches all docs up to a {@code maxDoc}.
*/
public class AllDocIdSet extends DocIdSet {
private final int maxDoc;
public AllDocIdSet(int maxDoc) {
this.maxDoc = maxDoc;
}
/**
* Does not go to the reader and ask for data, so can be cached.
*/
@Override
public boolean isCacheable() {
return true;
}
@Override
public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT;
}
@Override
public DocIdSetIterator iterator() throws IOException {
return new Iterator(maxDoc);
}
@Override
public Bits bits() throws IOException {
return new Bits.MatchAllBits(maxDoc);
}
public static final class Iterator extends DocIdSetIterator {
private final int maxDoc;
private int doc = -1;
public Iterator(int maxDoc) {
this.maxDoc = maxDoc;
}
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
if (++doc < maxDoc) {
return doc;
}
return doc = NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
doc = target;
if (doc < maxDoc) {
return doc;
}
return doc = NO_MORE_DOCS;
}
@Override
public long cost() {
return maxDoc;
}
}
}

View File

@ -1,67 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FilteredDocIdSetIterator;
import org.apache.lucene.util.Bits;
/**
* A {@link Bits} based iterator.
*/
public class BitsDocIdSetIterator extends MatchDocIdSetIterator {
private final Bits bits;
public BitsDocIdSetIterator(Bits bits) {
super(bits.length());
this.bits = bits;
}
public BitsDocIdSetIterator(int maxDoc, Bits bits) {
super(maxDoc);
this.bits = bits;
}
@Override
protected boolean matchDoc(int doc) {
return bits.get(doc);
}
public static class FilteredIterator extends FilteredDocIdSetIterator {
private final Bits bits;
FilteredIterator(DocIdSetIterator innerIter, Bits bits) {
super(innerIter);
this.bits = bits;
}
@Override
protected boolean match(int doc) {
return bits.get(doc);
}
}
@Override
public long cost() {
return this.bits.length();
}
}

View File

@ -19,17 +19,11 @@
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.RoaringDocIdSet;
import org.apache.lucene.util.SparseFixedBitSet;
import org.elasticsearch.common.Nullable;
import java.io.IOException;
@ -38,13 +32,6 @@ import java.io.IOException;
*/
public class DocIdSets {
/**
* Return the size of the doc id set, plus a reference to it.
*/
public static long sizeInBytes(DocIdSet docIdSet) {
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + docIdSet.ramBytesUsed();
}
/**
* Is it an empty {@link DocIdSet}?
*/
@ -52,59 +39,6 @@ public class DocIdSets {
return set == null || set == DocIdSet.EMPTY;
}
/**
* Converts to a cacheable {@link DocIdSet}
* <p/>
* This never returns <code>null</code>.
*/
public static DocIdSet toCacheable(LeafReader reader, @Nullable DocIdSet set) throws IOException {
if (set == null || set == DocIdSet.EMPTY) {
return DocIdSet.EMPTY;
}
final DocIdSetIterator it = set.iterator();
if (it == null) {
return DocIdSet.EMPTY;
}
final int firstDoc = it.nextDoc();
if (firstDoc == DocIdSetIterator.NO_MORE_DOCS) {
return DocIdSet.EMPTY;
}
if (set instanceof BitDocIdSet) {
return set;
}
final RoaringDocIdSet.Builder builder = new RoaringDocIdSet.Builder(reader.maxDoc());
builder.add(firstDoc);
for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
builder.add(doc);
}
return builder.build();
}
/**
* Get a build a {@link Bits} instance that will match all documents
* contained in {@code set}. Note that this is a potentially heavy
* operation as this might require to consume an iterator of this set
* entirely and to load it into a {@link BitSet}. Prefer using
* {@link #asSequentialAccessBits} if you only need to consume the
* {@link Bits} once and in order.
*/
public static Bits toSafeBits(int maxDoc, @Nullable DocIdSet set) throws IOException {
if (set == null) {
return new Bits.MatchNoBits(maxDoc);
}
Bits bits = set.bits();
if (bits != null) {
return bits;
}
DocIdSetIterator iterator = set.iterator();
if (iterator == null) {
return new Bits.MatchNoBits(maxDoc);
}
return toBitSet(iterator, maxDoc);
}
/**
* Given a {@link Scorer}, return a {@link Bits} instance that will match
* all documents contained in the set. Note that the returned {@link Bits}
@ -168,18 +102,4 @@ public class DocIdSets {
};
}
/**
* Creates a {@link BitSet} from an iterator.
*/
public static BitSet toBitSet(DocIdSetIterator iterator, int numBits) throws IOException {
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(numBits);
builder.or(iterator);
BitDocIdSet result = builder.build();
if (result != null) {
return result.bits();
} else {
return new SparseFixedBitSet(numBits);
}
}
}

View File

@ -1,68 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
*/
public abstract class MatchDocIdSetIterator extends DocIdSetIterator {
private final int maxDoc;
private int doc = -1;
public MatchDocIdSetIterator(int maxDoc) {
this.maxDoc = maxDoc;
}
protected abstract boolean matchDoc(int doc);
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
do {
doc++;
if (doc >= maxDoc) {
return doc = NO_MORE_DOCS;
}
} while (!matchDoc(doc));
return doc;
}
@Override
public int advance(int target) throws IOException {
if (target >= maxDoc) {
return doc = NO_MORE_DOCS;
}
doc = target;
while (!matchDoc(doc)) {
doc++;
if (doc >= maxDoc) {
return doc = NO_MORE_DOCS;
}
}
return doc;
}
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.common.settings.loader;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.util.Map;
@ -53,7 +53,7 @@ public class PropertiesSettingsLoader implements SettingsLoader {
@Override
public Map<String, String> load(byte[] source) throws IOException {
Properties props = new Properties();
BytesStreamInput stream = new BytesStreamInput(source);
StreamInput stream = StreamInput.wrap(source);
try {
props.load(stream);
Map<String, String> result = newHashMap();

View File

@ -20,7 +20,9 @@
package org.elasticsearch.common.xcontent;
import com.google.common.base.Charsets;
import com.google.common.base.Objects;
import com.google.common.collect.Maps;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.bytes.BytesArray;
@ -30,7 +32,7 @@ import org.elasticsearch.common.compress.CompressedStreamInput;
import org.elasticsearch.common.compress.Compressor;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import java.io.IOException;
@ -65,7 +67,7 @@ public class XContentHelper {
public static XContentParser createParser(byte[] data, int offset, int length) throws IOException {
Compressor compressor = CompressorFactory.compressor(data, offset, length);
if (compressor != null) {
CompressedStreamInput compressedInput = compressor.streamInput(new BytesStreamInput(data, offset, length));
CompressedStreamInput compressedInput = compressor.streamInput(StreamInput.wrap(data, offset, length));
XContentType contentType = XContentFactory.xContentType(compressedInput);
compressedInput.resetToBufferStart();
return XContentFactory.xContent(contentType).createParser(compressedInput);
@ -111,7 +113,7 @@ public class XContentHelper {
XContentType contentType;
Compressor compressor = CompressorFactory.compressor(data, offset, length);
if (compressor != null) {
CompressedStreamInput compressedStreamInput = compressor.streamInput(new BytesStreamInput(data, offset, length));
CompressedStreamInput compressedStreamInput = compressor.streamInput(StreamInput.wrap(data, offset, length));
contentType = XContentFactory.xContentType(compressedStreamInput);
compressedStreamInput.resetToBufferStart();
parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
@ -260,11 +262,11 @@ public class XContentHelper {
if (modified) {
continue;
}
if (!checkUpdatesAreUnequal || old == null) {
if (!checkUpdatesAreUnequal) {
modified = true;
continue;
}
modified = !old.equals(changesEntry.getValue());
modified = !Objects.equal(old, changesEntry.getValue());
}
return modified;
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.discovery.local;
import com.google.common.base.Objects;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.block.ClusterBlocks;
@ -32,8 +31,8 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -47,8 +46,6 @@ import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static com.google.common.collect.Sets.newHashSet;
import static org.elasticsearch.cluster.ClusterState.Builder;
@ -330,7 +327,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
clusterStateDiffBytes = os.bytes().toBytes();
}
try {
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(new BytesStreamInput(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
logger.debug("sending diff cluster state version with size {} to [{}]", clusterStateDiffBytes.length, discovery.localNode.getName());
} catch (IncompatibleClusterStateVersionException ex) {
logger.warn("incompatible cluster state version - resending complete cluster state", ex);

View File

@ -391,7 +391,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent<ZenPing> implem
}
}
if (internal) {
StreamInput input = new BytesStreamInput(new BytesArray(data.toBytes(), INTERNAL_HEADER.length, data.length() - INTERNAL_HEADER.length));
StreamInput input = StreamInput.wrap(new BytesArray(data.toBytes(), INTERNAL_HEADER.length, data.length() - INTERNAL_HEADER.length));
Version version = Version.readVersion(input);
input.setVersion(version);
id = input.readInt();

View File

@ -23,8 +23,10 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.predicates.ObjectPredicate;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -45,14 +47,13 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import org.elasticsearch.transport.ConnectTransportException;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
/**
@ -101,6 +102,15 @@ public class GatewayAllocator extends AbstractComponent {
}
}
/**
* Return {@code true} if the index is configured to allow shards to be
* recovered on any node
*/
private boolean recoverOnAnyNode(@IndexSettings Settings idxSettings) {
return IndexMetaData.isOnSharedFilesystem(idxSettings) &&
idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
}
public boolean allocateUnassigned(RoutingAllocation allocation) {
boolean changed = false;
DiscoveryNodes nodes = allocation.nodes();
@ -125,11 +135,13 @@ public class GatewayAllocator extends AbstractComponent {
int numberOfAllocationsFound = 0;
long highestVersion = -1;
Set<DiscoveryNode> nodesWithHighestVersion = Sets.newHashSet();
final Map<DiscoveryNode, Long> nodesWithVersion = Maps.newHashMap();
assert !nodesState.containsKey(null);
final Object[] keys = nodesState.keys;
final long[] values = nodesState.values;
IndexMetaData indexMetaData = routingNodes.metaData().index(shard.index());
Settings idxSettings = indexMetaData.settings();
for (int i = 0; i < keys.length; i++) {
if (keys[i] == null) {
continue;
@ -141,29 +153,63 @@ public class GatewayAllocator extends AbstractComponent {
if (allocation.shouldIgnoreShardForNode(shard.shardId(), node.id())) {
continue;
}
if (version != -1) {
if (recoverOnAnyNode(idxSettings)) {
numberOfAllocationsFound++;
if (highestVersion == -1) {
nodesWithHighestVersion.add(node);
if (version > highestVersion) {
highestVersion = version;
} else {
if (version > highestVersion) {
nodesWithHighestVersion.clear();
nodesWithHighestVersion.add(node);
highestVersion = version;
} else if (version == highestVersion) {
nodesWithHighestVersion.add(node);
}
}
// We always put the node without clearing the map
nodesWithVersion.put(node, version);
} else if (version != -1) {
numberOfAllocationsFound++;
// If we've found a new "best" candidate, clear the
// current candidates and add it
if (version > highestVersion) {
highestVersion = version;
nodesWithVersion.clear();
nodesWithVersion.put(node, version);
} else if (version == highestVersion) {
// If the candidate is the same, add it to the
// list, but keep the current candidate
nodesWithVersion.put(node, version);
}
}
}
// Now that we have a map of nodes to versions along with the
// number of allocations found (and not ignored), we need to sort
// it so the node with the highest version is at the beginning
List<DiscoveryNode> nodesWithHighestVersion = Lists.newArrayList();
nodesWithHighestVersion.addAll(nodesWithVersion.keySet());
CollectionUtil.timSort(nodesWithHighestVersion, new Comparator<DiscoveryNode>() {
@Override
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
return Long.compare(nodesWithVersion.get(o2), nodesWithVersion.get(o1));
}
});
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]",
shard.index(), shard.id(), numberOfAllocationsFound, shard, highestVersion);
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("[");
for (DiscoveryNode n : nodesWithHighestVersion) {
sb.append("[");
sb.append(n.getName());
sb.append("]");
sb.append(" -> ");
sb.append(nodesWithVersion.get(n));
sb.append(", ");
}
sb.append("]");
logger.trace("{} candidates for allocation: {}", shard, sb.toString());
}
// check if the counts meets the minimum set
int requiredAllocation = 1;
// if we restore from a repository one copy is more then enough
if (shard.restoreSource() == null) {
try {
IndexMetaData indexMetaData = routingNodes.metaData().index(shard.index());
String initialShards = indexMetaData.settings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
if ("quorum".equals(initialShards)) {
if (indexMetaData.numberOfReplicas() > 1) {
@ -426,13 +472,6 @@ public class GatewayAllocator extends AbstractComponent {
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : response) {
long version = nodeShardState.version();
Settings idxSettings = indexMetaData.settings();
if (IndexMetaData.isOnSharedFilesystem(idxSettings) &&
idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false)) {
// Shared filesystems use 0 as a minimum shard state, which
// means that the shard can be allocated to any node
version = Math.max(0, version);
}
// -1 version means it does not exists, which is what the API returns, and what we expect to
logger.trace("[{}] on node [{}] has version [{}] of shard",
shard, nodeShardState.getNode(), version);

View File

@ -63,7 +63,9 @@ public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper {
if (analyzer != null) {
return analyzer;
}
return defaultAnalyzer;
// Don't be lenient here and return the default analyzer
// Fields need to be explicitly added
throw new IllegalArgumentException("Field [" + name + "] has no associated analyzer");
}
/**
@ -72,9 +74,11 @@ public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper {
public FieldNameAnalyzer copyAndAddAll(Collection<? extends Map.Entry<String, Analyzer>> mappers) {
CopyOnWriteHashMap<String, Analyzer> analyzers = this.analyzers;
for (Map.Entry<String, Analyzer> entry : mappers) {
if (entry.getValue() != null) {
analyzers = analyzers.copyAndPut(entry.getKey(), entry.getValue());
Analyzer analyzer = entry.getValue();
if (analyzer == null) {
analyzer = defaultAnalyzer;
}
analyzers = analyzers.copyAndPut(entry.getKey(), analyzer);
}
return new FieldNameAnalyzer(analyzers, defaultAnalyzer);
}

View File

@ -1,89 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import java.io.IOException;
/**
* @deprecated Id cache has been removed in favor for {@link org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData}
* this class now reports field data memory usage for _parent field.
*/
@Deprecated
public class IdCacheStats implements Streamable, ToXContent {
long memorySize;
public IdCacheStats() {
}
public IdCacheStats(long memorySize) {
this.memorySize = memorySize;
}
public void add(IdCacheStats stats) {
this.memorySize += stats.memorySize;
}
public long getMemorySizeInBytes() {
return this.memorySize;
}
public ByteSizeValue getMemorySize() {
return new ByteSizeValue(memorySize);
}
public static IdCacheStats readIdCacheStats(StreamInput in) throws IOException {
IdCacheStats stats = new IdCacheStats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
memorySize = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(memorySize);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.ID_CACHE);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString ID_CACHE = new XContentBuilderString("id_cache");
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
}
}

View File

@ -558,31 +558,10 @@ public abstract class Engine implements Closeable {
return t;
}
public static interface FailedEngineListener {
public interface FailedEngineListener {
void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t);
}
/**
* Recovery allow to start the recovery process. It is built of three phases.
* <p/>
* <p>The first phase allows to take a snapshot of the master index. Once this
* is taken, no commit operations are effectively allowed on the index until the recovery
* phases are through.
* <p/>
* <p>The seconds phase takes a snapshot of the current transaction log.
* <p/>
* <p>The last phase returns the remaining transaction log. During this phase, no dirty
* operations are allowed on the index.
*/
public static interface RecoveryHandler {
void phase1(SnapshotIndexCommit snapshot);
void phase2(Translog.Snapshot snapshot);
void phase3(Translog.Snapshot snapshot);
}
public static class Searcher implements Releasable {
private final String source;

View File

@ -29,7 +29,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
@ -40,10 +39,10 @@ import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.threadpool.ThreadPool;
import java.nio.file.Path;
import java.util.concurrent.TimeUnit;
/*
@ -76,11 +75,9 @@ public final class EngineConfig {
private final Similarity similarity;
private final CodecService codecService;
private final Engine.FailedEngineListener failedEngineListener;
private final boolean ignoreUnknownTranslog;
private final boolean forceNewTranslog;
private final QueryCache filterCache;
private final QueryCachingPolicy filterCachingPolicy;
private final BigArrays bigArrays;
private final Path translogPath;
/**
* Index setting for index concurrency / number of threadstates in the indexwriter.
@ -126,7 +123,7 @@ public final class EngineConfig {
/** if set to true the engine will start even if the translog id in the commit point can not be found */
public static final String INDEX_IGNORE_UNKNOWN_TRANSLOG = "index.engine.ignore_unknown_translog";
public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog";
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
@ -137,6 +134,7 @@ public final class EngineConfig {
public static final String DEFAULT_VERSION_MAP_SIZE = "25%";
private static final String DEFAULT_CODEC_NAME = "default";
private TranslogConfig translogConfig;
/**
@ -146,7 +144,7 @@ public final class EngineConfig {
IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy, BigArrays bigArrays, Path translogPath) {
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy, TranslogConfig translogConfig) {
this.shardId = shardId;
this.threadPool = threadPool;
this.indexingService = indexingService;
@ -160,8 +158,6 @@ public final class EngineConfig {
this.similarity = similarity;
this.codecService = codecService;
this.failedEngineListener = failedEngineListener;
this.bigArrays = bigArrays;
this.translogPath = translogPath;
Settings indexSettings = indexSettingsService.getSettings();
this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false);
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
@ -172,9 +168,10 @@ public final class EngineConfig {
versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
updateVersionMapSize();
this.translogRecoveryPerformer = translogRecoveryPerformer;
this.ignoreUnknownTranslog = indexSettings.getAsBoolean(INDEX_IGNORE_UNKNOWN_TRANSLOG, false);
this.forceNewTranslog = indexSettings.getAsBoolean(INDEX_FORCE_NEW_TRANSLOG, false);
this.filterCache = filterCache;
this.filterCachingPolicy = filterCachingPolicy;
this.translogConfig = translogConfig;
}
/** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */
@ -203,8 +200,8 @@ public final class EngineConfig {
}
/** if true the engine will start even if the translog id in the commit point can not be found */
public boolean getIgnoreUnknownTranslog() {
return ignoreUnknownTranslog;
public boolean forceNewTranslog() {
return forceNewTranslog;
}
/**
@ -429,23 +426,14 @@ public final class EngineConfig {
}
/**
* Returns a BigArrays instance for this engine
* Returns the translog config for this engine
*/
public BigArrays getBigArrays() {
return bigArrays;
public TranslogConfig getTranslogConfig() {
return translogConfig;
}
/**
* Returns the translog path for this engine
*/
public Path getTranslogPath() {
return translogPath;
}
/**
* Returns the {@link org.elasticsearch.index.settings.IndexSettingsService} for this engine.
*/
public IndexSettingsService getIndesSettingService() {
IndexSettingsService getIndexSettingsService() { // for testing
return indexSettingsService;
}
}

View File

@ -21,19 +21,8 @@ package org.elasticsearch.index.engine;
import com.google.common.collect.Lists;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.LiveIndexWriterConfig;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.SegmentCommitInfo;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
@ -49,6 +38,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.routing.DjbHashFunction;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.LoggerInfoStream;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
@ -64,13 +54,15 @@ import org.elasticsearch.index.merge.policy.ElasticsearchMergePolicy;
import org.elasticsearch.index.merge.policy.MergePolicyProvider;
import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.index.translog.TranslogCorruptedException;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
@ -82,7 +74,6 @@ import java.util.concurrent.locks.ReentrantLock;
*
*/
public class InternalEngine extends Engine {
private final FailEngineOnMergeFailure mergeSchedulerFailureListener;
private final MergeSchedulerListener mergeSchedulerListener;
@ -141,14 +132,18 @@ public class InternalEngine extends Engine {
}
throttle = new IndexThrottle();
this.searcherFactory = new SearchFactory(engineConfig);
final Long committedTranslogId;
this.searcherFactory = new SearchFactory(logger, isClosed, engineConfig);
final Translog.TranslogGeneration translogGeneration;
try {
writer = createWriter();
// TODO: would be better if ES could tell us "from above" whether this shard was already here, instead of using Lucene's API
// (which relies on IO ops, directory listing, and has had scary bugs in the past):
boolean create = !Lucene.indexExists(store.directory());
writer = createWriter(create);
indexWriter = writer;
translog = new Translog(engineConfig.getShardId(), engineConfig.getIndesSettingService(), engineConfig.getBigArrays(), engineConfig.getTranslogPath(), engineConfig.getThreadPool());
committedTranslogId = loadCommittedTranslogId(writer, translog);
} catch (IOException e) {
translog = openTranslog(engineConfig, writer, create || skipInitialTranslogRecovery || engineConfig.forceNewTranslog());
translogGeneration = translog.getGeneration();
assert translogGeneration != null;
} catch (IOException | TranslogCorruptedException e) {
throw new EngineCreationFailureException(shardId, "failed to create engine", e);
}
this.translog = translog;
@ -162,9 +157,9 @@ public class InternalEngine extends Engine {
try {
if (skipInitialTranslogRecovery) {
// make sure we point at the latest translog from now on..
commitIndexWriter(writer, translog.currentId(), lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID));
commitIndexWriter(writer, translog, lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID));
} else {
recoverFromTranslog(engineConfig, committedTranslogId);
recoverFromTranslog(engineConfig, translogGeneration);
}
} catch (IOException | EngineException ex) {
throw new EngineCreationFailureException(shardId, "failed to recover from translog", ex);
@ -183,26 +178,44 @@ public class InternalEngine extends Engine {
logger.trace("created new InternalEngine");
}
private Translog openTranslog(EngineConfig engineConfig, IndexWriter writer, boolean createNew) throws IOException {
final Translog.TranslogGeneration generation = loadTranslogIdFromCommit(writer);
final TranslogConfig translogConfig = engineConfig.getTranslogConfig();
if (createNew == false) {
// We expect that this shard already exists, so it must already have an existing translog else something is badly wrong!
if (generation == null) {
throw new IllegalStateException("no translog generation present in commit data but translog is expected to exist");
}
translogConfig.setTranslogGeneration(generation);
if (generation != null && generation.translogUUID == null) {
// only upgrade on pre-2.0 indices...
Translog.upgradeLegacyTranslog(logger, translogConfig);
}
}
final Translog translog = new Translog(translogConfig);
if (generation == null) {
logger.debug("no translog ID present in the current generation - creating one");
boolean success = false;
try {
commitIndexWriter(writer, translog);
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(translog);
}
}
}
return translog;
}
@Override
public Translog getTranslog() {
ensureOpen();
return translog;
}
protected void recoverFromTranslog(EngineConfig engineConfig, Long committedTranslogId) throws IOException {
if (committedTranslogId != null) {
try {
// trim unneeded files
translog.markCommitted(committedTranslogId);
} catch (FileNotFoundException ex) {
if (engineConfig.getIgnoreUnknownTranslog()) {
logger.warn("ignoring committed translog id [{}] ([{}] set to true)", committedTranslogId,
EngineConfig.INDEX_IGNORE_UNKNOWN_TRANSLOG);
} else {
throw ex;
}
}
}
protected void recoverFromTranslog(EngineConfig engineConfig, Translog.TranslogGeneration translogGeneration) throws IOException {
int opsRecovered = 0;
final TranslogRecoveryPerformer handler = engineConfig.getTranslogRecoveryPerformer();
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
@ -223,14 +236,15 @@ public class InternalEngine extends Engine {
} catch (Throwable e) {
throw new EngineException(shardId, "failed to recover from translog", e);
}
// flush if we recovered something or if we have references to older translogs
// note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length.
if (opsRecovered > 0) {
logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]",
opsRecovered, committedTranslogId, translog.currentId());
opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration());
flush(true, true);
} else if (committedTranslogId != null && translog.currentId() != committedTranslogId){
commitIndexWriter(indexWriter, translog.currentId(), lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
} else if (translog.isCurrent(translogGeneration) == false){
commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
@ -239,15 +253,21 @@ public class InternalEngine extends Engine {
* translog id into lucene and returns null.
*/
@Nullable
private Long loadCommittedTranslogId(IndexWriter writer, Translog translog) throws IOException {
private Translog.TranslogGeneration loadTranslogIdFromCommit(IndexWriter writer) throws IOException {
// commit on a just opened writer will commit even if there are no changes done to it
// we rely on that for the commit data translog id key
final Map<String, String> commitUserData = writer.getCommitData();
if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) {
return Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY));
if (commitUserData.containsKey("translog_id")) {
assert commitUserData.containsKey(Translog.TRANSLOG_UUID_KEY) == false : "legacy commit contains translog UUID";
return new Translog.TranslogGeneration(null, Long.parseLong(commitUserData.get("translog_id")));
} else if (commitUserData.containsKey(Translog.TRANSLOG_GENERATION_KEY)) {
if (commitUserData.containsKey(Translog.TRANSLOG_UUID_KEY) == false) {
throw new IllegalStateException("commit doesn't contain translog UUID");
}
final String translogUUID = commitUserData.get(Translog.TRANSLOG_UUID_KEY);
final long translogGen = Long.parseLong(commitUserData.get(Translog.TRANSLOG_GENERATION_KEY));
return new Translog.TranslogGeneration(translogUUID, translogGen);
}
logger.debug("no translog ID present in the current commit - creating one");
commitIndexWriter(writer, translog.currentId());
return null;
}
@ -687,8 +707,7 @@ public class InternalEngine extends Engine {
return SyncedFlushResult.COMMIT_MISMATCH;
}
logger.trace("starting sync commit [{}]", syncId);
final long translogId = translog.currentId();
commitIndexWriter(indexWriter, translogId, syncId);
commitIndexWriter(indexWriter, translog, syncId);
logger.debug("successfully sync committed. sync id [{}].", syncId);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
return SyncedFlushResult.SUCCESS;
@ -700,15 +719,11 @@ public class InternalEngine extends Engine {
@Override
public CommitId flush() throws EngineException {
return flush(true, false, false);
return flush(false, false);
}
@Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
return flush(true, force, waitIfOngoing);
}
private CommitId flush(boolean commitTranslog, boolean force, boolean waitIfOngoing) throws EngineException {
ensureOpen();
final byte[] newCommitId;
/*
@ -733,38 +748,21 @@ public class InternalEngine extends Engine {
logger.trace("acquired flush lock immediately");
}
try {
if (commitTranslog) {
if (flushNeeded || force) {
flushNeeded = false;
final long translogId;
try {
translogId = translog.newTranslog();
logger.trace("starting commit for flush; commitTranslog=true");
commitIndexWriter(indexWriter, translogId);
logger.trace("finished commit for flush");
// we need to refresh in order to clear older version values
refresh("version_table_flush");
translog.markCommitted(translogId);
} catch (Throwable e) {
throw new FlushFailedEngineException(shardId, e);
}
}
} else {
// note, its ok to just commit without cleaning the translog, its perfectly fine to replay a
// translog on an index that was opened on a committed point in time that is "in the future"
// of that translog
// we allow to *just* commit if there is an ongoing recovery happening...
// its ok to use this, only a flush will cause a new translogId, and we are locked here from
// other flushes use flushLock
if (flushNeeded || force) {
flushNeeded = false;
final long translogId;
try {
logger.trace("starting commit for flush; commitTranslog=false");
commitIndexWriter(indexWriter, translog.currentId());
translog.prepareCommit();
logger.trace("starting commit for flush; commitTranslog=true");
commitIndexWriter(indexWriter, translog);
logger.trace("finished commit for flush");
translog.commit();
// we need to refresh in order to clear older version values
refresh("version_table_flush");
} catch (Throwable e) {
throw new FlushFailedEngineException(shardId, e);
}
}
/*
* we have to inc-ref the store here since if the engine is closed by a tragic event
@ -860,7 +858,7 @@ public class InternalEngine extends Engine {
indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/);
}
if (flush) {
flush(true, true, true);
flush(true, true);
}
if (upgrade) {
logger.info("finished segment upgrade");
@ -887,7 +885,7 @@ public class InternalEngine extends Engine {
// the to a write lock when we fail the engine in this operation
if (flushFirst) {
logger.trace("start flush for snapshot");
flush(false, false, true);
flush(false, true);
logger.trace("finish flush for snapshot");
}
try (ReleasableLock lock = readLock.acquire()) {
@ -1023,9 +1021,8 @@ public class InternalEngine extends Engine {
}
}
private IndexWriter createWriter() throws IOException {
private IndexWriter createWriter(boolean create) throws IOException {
try {
boolean create = !Lucene.indexExists(store.directory());
final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
iwc.setCommitOnClose(false); // we by default don't commit on close
iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
@ -1085,14 +1082,19 @@ public class InternalEngine extends Engine {
throw ex;
}
}
/** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */
final static class SearchFactory extends EngineSearcherFactory {
private final IndicesWarmer warmer;
private final ShardId shardId;
private final ESLogger logger;
private final AtomicBoolean isEngineClosed;
/**
* Extended SearcherFactory that warms the segments if needed when acquiring a new searcher
*/
class SearchFactory extends EngineSearcherFactory {
SearchFactory(EngineConfig engineConfig) {
SearchFactory(ESLogger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) {
super(engineConfig);
warmer = engineConfig.getWarmer();
shardId = engineConfig.getShardId();
this.logger = logger;
this.isEngineClosed = isEngineClosed;
}
@Override
@ -1104,36 +1106,34 @@ public class InternalEngine extends Engine {
IndexSearcher newSearcher = null;
boolean closeNewSearcher = false;
try {
if (searcherManager == null) {
if (previousReader == null) {
// we are starting up - no writer active so we can't acquire a searcher.
newSearcher = searcher;
} else {
try (final Searcher currentSearcher = acquireSearcher("search_factory")) {
// figure out the newSearcher, with only the new readers that are relevant for us
List<IndexReader> readers = Lists.newArrayList();
for (LeafReaderContext newReaderContext : searcher.getIndexReader().leaves()) {
if (isMergedSegment(newReaderContext.reader())) {
// merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer
continue;
}
boolean found = false;
for (LeafReaderContext currentReaderContext : currentSearcher.reader().leaves()) {
if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) {
found = true;
break;
}
}
if (!found) {
readers.add(newReaderContext.reader());
// figure out the newSearcher, with only the new readers that are relevant for us
List<IndexReader> readers = Lists.newArrayList();
for (LeafReaderContext newReaderContext : reader.leaves()) {
if (isMergedSegment(newReaderContext.reader())) {
// merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer
continue;
}
boolean found = false;
for (LeafReaderContext currentReaderContext : previousReader.leaves()) {
if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) {
found = true;
break;
}
}
if (!readers.isEmpty()) {
// we don't want to close the inner readers, just increase ref on them
IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false);
newSearcher = super.newSearcher(newReader, null);
closeNewSearcher = true;
if (!found) {
readers.add(newReaderContext.reader());
}
}
if (!readers.isEmpty()) {
// we don't want to close the inner readers, just increase ref on them
IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false);
newSearcher = super.newSearcher(newReader, null);
closeNewSearcher = true;
}
}
if (newSearcher != null) {
@ -1142,7 +1142,7 @@ public class InternalEngine extends Engine {
}
warmer.warmTopReader(new IndicesWarmer.WarmerContext(shardId, new Searcher("warmer", searcher)));
} catch (Throwable e) {
if (isClosed.get() == false) {
if (isEngineClosed.get() == false) {
logger.warn("failed to prepare/warm", e);
}
} finally {
@ -1213,11 +1213,13 @@ public class InternalEngine extends Engine {
}
}
private void commitIndexWriter(IndexWriter writer, long translogId, String syncId) throws IOException {
private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException {
try {
logger.trace("committing writer with translog id [{}] and sync id [{}]", translogId, syncId);
Map<String, String> commitData = new HashMap<>();
commitData.put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId));
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
logger.trace("committing writer with translog id [{}] and sync id [{}] ", translogGeneration.translogFileGeneration, syncId);
Map<String, String> commitData = new HashMap<>(2);
commitData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGeneration.translogFileGeneration));
commitData.put(Translog.TRANSLOG_UUID_KEY, translogGeneration.translogUUID);
if (syncId != null) {
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
}
@ -1229,7 +1231,7 @@ public class InternalEngine extends Engine {
}
}
private void commitIndexWriter(IndexWriter writer, long translogId) throws IOException {
commitIndexWriter(writer, translogId, null);
private void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException {
commitIndexWriter(writer, translog, null);
}
}

View File

@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.engine;
public class InternalEngineFactory implements EngineFactory {

Some files were not shown because too many files have changed in this diff Show More