Merge branch 'master' into enhancement/remove_node_client_setting

This commit is contained in:
javanna 2016-03-29 14:27:04 +02:00 committed by Luca Cavanna
commit 8fc9dbbb99
21 changed files with 121 additions and 109 deletions

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.query; package org.elasticsearch.index.query;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
@ -34,7 +35,11 @@ public class TypeQueryBuilderTests extends AbstractQueryTestCase<TypeQueryBuilde
@Override @Override
protected void doAssertLuceneQuery(TypeQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { protected void doAssertLuceneQuery(TypeQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
assertEquals(new TypeFieldMapper.TypeQuery(new BytesRef(queryBuilder.type())), query); if (queryShardContext().getMapperService().documentMapper(queryBuilder.type()) == null) {
assertEquals(new MatchNoDocsQuery(), query);
} else {
assertEquals(new TypeFieldMapper.TypeQuery(new BytesRef(queryBuilder.type())), query);
}
} }
public void testIllegalArgument() { public void testIllegalArgument() {

View File

@ -45,6 +45,8 @@ include::cluster/nodes-stats.asciidoc[]
include::cluster/nodes-info.asciidoc[] include::cluster/nodes-info.asciidoc[]
include::cluster/nodes-task.asciidoc[]
include::cluster/nodes-hot-threads.asciidoc[] include::cluster/nodes-hot-threads.asciidoc[]
include::cluster/allocation-explain.asciidoc[] include::cluster/allocation-explain.asciidoc[]

View File

@ -0,0 +1,49 @@
[[nodes-task]]
== Nodes Task API
The nodes task management API retrieves information about the tasks currently
executing on one or more nodes in the cluster.
[source,js]
--------------------------------------------------
GET /_tasks <1>
GET /_tasks/nodeId1,nodeId2 <2>
GET /_tasks/nodeId1,nodeId2/cluster:* <3>
--------------------------------------------------
// AUTOSENSE
<1> Retrieves all tasks currently running on all nodes in the cluster.
<2> Retrieves all tasks running on nodes `nodeId1` and `nodeId2`. See <<cluster-nodes>> for more info about how to select individual nodes.
<3> Retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`.
The result will look similar to the following:
[source,js]
--------------------------------------------------
{
"nodes": {
"fDlEl7PrQi6F-awHZ3aaDw": {
"name": "Gazer",
"transport_address": "127.0.0.1:9300",
"host": "127.0.0.1",
"ip": "127.0.0.1:9300",
"tasks": [
{
"node": "fDlEl7PrQi6F-awHZ3aaDw",
"id": 105,
"type": "transport",
"action": "cluster:monitor/nodes/tasks"
},
{
"node": "fDlEl7PrQi6F-awHZ3aaDw",
"id": 106,
"type": "direct",
"action": "cluster:monitor/nodes/tasks[n]",
"parent_node": "fDlEl7PrQi6F-awHZ3aaDw",
"parent_id": 105
}
]
}
}
}
--------------------------------------------------

View File

@ -1,8 +1,8 @@
[[docs-reindex]] [[docs-reindex]]
== Reindex API == Reindex API
`_reindex`'s most basic form just copies documents from one index to another. The most basic form of `_reindex` just copies documents from one index to another.
This will copy documents from `twitter` into `new_twitter`: This will copy documents from the `twitter` index into the `new_twitter` index:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -32,12 +32,13 @@ That will return something like this:
} }
-------------------------------------------------- --------------------------------------------------
Just like `_update_by_query`, `_reindex` gets a snapshot of the source index Just like <<docs-update-by-query,`_update_by_query`>>, `_reindex` gets a
but its target must be a **different** index so version conflicts are unlikely. snapshot of the source index but its target must be a **different** index so
The `dest` element can be configured like the index API to control optimistic version conflicts are unlikely. The `dest` element can be configured like the
concurrency control. Just leaving out `version_type` (as above) or setting it index API to control optimistic concurrency control. Just leaving out
to `internal` will cause Elasticsearch to blindly dump documents into the `version_type` (as above) or setting it to `internal` will cause Elasticsearch
target, overwriting any that happen to have the same type and id: to blindly dump documents into the target, overwriting any that happen to have
the same type and id:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -113,7 +114,7 @@ POST /_reindex
// AUTOSENSE // AUTOSENSE
You can limit the documents by adding a type to the `source` or by adding a You can limit the documents by adding a type to the `source` or by adding a
query. This will only copy `tweet`s made by `kimchy` into `new_twitter`: query. This will only copy ++tweet++&apos;s made by `kimchy` into `new_twitter`:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -140,9 +141,9 @@ lots of sources in one request. This will copy documents from the `tweet` and
`post` types in the `twitter` and `blog` index. It'd include the `post` type in `post` types in the `twitter` and `blog` index. It'd include the `post` type in
the `twitter` index and the `tweet` type in the `blog` index. If you want to be the `twitter` index and the `tweet` type in the `blog` index. If you want to be
more specific you'll need to use the `query`. It also makes no effort to handle more specific you'll need to use the `query`. It also makes no effort to handle
id collisions. The target index will remain valid but it's not easy to predict ID collisions. The target index will remain valid but it's not easy to predict
which document will survive because the iteration order isn't well defined. which document will survive because the iteration order isn't well defined.
Just avoid that situation, ok?
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
POST /_reindex POST /_reindex
@ -222,14 +223,15 @@ POST /_reindex
Think of the possibilities! Just be careful! With great power.... You can Think of the possibilities! Just be careful! With great power.... You can
change: change:
* "_id"
* "_type" * `_id`
* "_index" * `_type`
* "_version" * `_index`
* "_routing" * `_version`
* "_parent" * `_routing`
* "_timestamp" * `_parent`
* "_ttl" * `_timestamp`
* `_ttl`
Setting `_version` to `null` or clearing it from the `ctx` map is just like not Setting `_version` to `null` or clearing it from the `ctx` map is just like not
sending the version in an indexing request. It will cause that document to be sending the version in an indexing request. It will cause that document to be
@ -257,6 +259,7 @@ the `=`.
For example, you can use the following request to copy all documents from For example, you can use the following request to copy all documents from
the `source` index with the company name `cat` into the `dest` index with the `source` index with the company name `cat` into the `dest` index with
routing set to `cat`. routing set to `cat`.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
POST /_reindex POST /_reindex
@ -316,7 +319,7 @@ Elasticsearch log file. This will be fixed soon.
`consistency` controls how many copies of a shard must respond to each write `consistency` controls how many copies of a shard must respond to each write
request. `timeout` controls how long each write request waits for unavailable request. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the shards to become available. Both work exactly how they work in the
{ref}/docs-bulk.html[Bulk API]. <<docs-bulk,Bulk API>>.
`requests_per_second` can be set to any decimal number (1.4, 6, 1000, etc) and `requests_per_second` can be set to any decimal number (1.4, 6, 1000, etc) and
throttle the number of requests per second that the reindex issues. The throttle the number of requests per second that the reindex issues. The
@ -385,7 +388,7 @@ from aborting the operation.
=== Works with the Task API === Works with the Task API
While Reindex is running you can fetch their status using the While Reindex is running you can fetch their status using the
{ref}/task/list.html[Task List APIs]: <<nodes-task,Nodes Task API>>:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------

View File

@ -56,7 +56,7 @@ POST /twitter/tweet/_update_by_query?conflicts=proceed
// AUTOSENSE // AUTOSENSE
You can also limit `_update_by_query` using the You can also limit `_update_by_query` using the
{ref}/query-dsl.html[Query DSL]. This will update all documents from the <<query-dsl,Query DSL>>. This will update all documents from the
`twitter` index for the user `kimchy`: `twitter` index for the user `kimchy`:
[source,js] [source,js]
@ -73,7 +73,7 @@ POST /twitter/_update_by_query?conflicts=proceed
// AUTOSENSE // AUTOSENSE
<1> The query must be passed as a value to the `query` key, in the same <1> The query must be passed as a value to the `query` key, in the same
way as the {ref}/search-search.html[Search API]. You can also use the `q` way as the <<search-search,Search API>>. You can also use the `q`
parameter in the same way as the search api. parameter in the same way as the search api.
So far we've only been updating documents without changing their source. That So far we've only been updating documents without changing their source. That
@ -81,6 +81,7 @@ is genuinely useful for things like
<<picking-up-a-new-property,picking up new properties>> but it's only half the <<picking-up-a-new-property,picking up new properties>> but it's only half the
fun. `_update_by_query` supports a `script` object to update the document. This fun. `_update_by_query` supports a `script` object to update the document. This
will increment the `likes` field on all of kimchy's tweets: will increment the `likes` field on all of kimchy's tweets:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
POST /twitter/_update_by_query POST /twitter/_update_by_query
@ -97,7 +98,7 @@ POST /twitter/_update_by_query
-------------------------------------------------- --------------------------------------------------
// AUTOSENSE // AUTOSENSE
Just as in {ref}/docs-update.html[Update API] you can set `ctx.op = "noop"` if Just as in <<docs-update,Update API>> you can set `ctx.op = "noop"` if
your script decides that it doesn't have to make any changes. That will cause your script decides that it doesn't have to make any changes. That will cause
`_update_by_query` to omit that document from its updates. Setting `ctx.op` to `_update_by_query` to omit that document from its updates. Setting `ctx.op` to
anything else is an error. If you want to delete by a query you can use the anything else is an error. If you want to delete by a query you can use the
@ -167,7 +168,7 @@ the Elasticsearch log file. This will be fixed soon.
`consistency` controls how many copies of a shard must respond to each write `consistency` controls how many copies of a shard must respond to each write
request. `timeout` controls how long each write request waits for unavailable request. `timeout` controls how long each write request waits for unavailable
shards to become available. Both work exactly how they work in the shards to become available. Both work exactly how they work in the
{ref}/docs-bulk.html[Bulk API]. <<docs-bulk,Bulk API>>.
`requests_per_second` can be set to any decimal number (1.4, 6, 1000, etc) and `requests_per_second` can be set to any decimal number (1.4, 6, 1000, etc) and
throttle the number of requests per second that the update by query issues. The throttle the number of requests per second that the update by query issues. The
@ -232,7 +233,7 @@ from aborting the operation.
=== Works with the Task API === Works with the Task API
While Update By Query is running you can fetch their status using the While Update By Query is running you can fetch their status using the
{ref}/task/list.html[Task List APIs]: <<nodes-task,Nodes Task API>>:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -285,6 +286,7 @@ progress by adding the `updated`, `created`, and `deleted` fields. The request
will finish when their sum is equal to the `total` field. will finish when their sum is equal to the `total` field.
[float]
[[picking-up-a-new-property]] [[picking-up-a-new-property]]
=== Pick up a new property === Pick up a new property
@ -379,4 +381,4 @@ POST test/_search?filter_path=hits.total
} }
-------------------------------------------------- --------------------------------------------------
Hurray! You can do the exact same thing when adding a field to a multifield. You can do the exact same thing when adding a field to a multifield.

View File

@ -1,46 +0,0 @@
[[tasks-list]]
== Tasks List
The task management API allows to retrieve information about currently running tasks.
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/_tasks'
curl -XGET 'http://localhost:9200/_tasks/nodeId1,nodeId2'
curl -XGET 'http://localhost:9200/_tasks/nodeId1,nodeId2/cluster:*'
--------------------------------------------------
The first command retrieves all tasks currently running on all nodes.
The second command selectively retrieves tasks from nodes
`nodeId1` and `nodeId2`. All the nodes selective options are explained
<<cluster-nodes,here>>.
The third command retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`.
The result will look similar to:
[source,js]
--------------------------------------------------
{
"nodes" : {
"fDlEl7PrQi6F-awHZ3aaDw" : {
"name" : "Gazer",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
"tasks" : [ {
"node" : "fDlEl7PrQi6F-awHZ3aaDw",
"id" : 105,
"type" : "transport",
"action" : "cluster:monitor/nodes/tasks"
}, {
"node" : "fDlEl7PrQi6F-awHZ3aaDw",
"id" : 106,
"type" : "direct",
"action" : "cluster:monitor/nodes/tasks[n]",
"parent_node" : "fDlEl7PrQi6F-awHZ3aaDw",
"parent_id" : 105
} ]
}
}
}
--------------------------------------------------

View File

@ -1,7 +1,7 @@
--- ---
"no body fails": "no body fails":
- do: - do:
catch: /body required/ catch: param
reindex: {} reindex: {}
--- ---

View File

@ -10,7 +10,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: test index: test
- is_false: timed_out - is_false: timed_out
- match: {updated: 1} - match: {updated: 1}
@ -35,7 +35,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
wait_for_completion: false wait_for_completion: false
index: test index: test
- match: {task: '/.+:\d+/'} - match: {task: '/.+:\d+/'}
@ -79,7 +79,7 @@
- do: - do:
catch: conflict catch: conflict
update-by-query: update_by_query:
index: test index: test
- match: {updated: 0} - match: {updated: 0}
- match: {version_conflicts: 1} - match: {version_conflicts: 1}
@ -119,7 +119,7 @@
body: { "text": "test2" } body: { "text": "test2" }
- do: - do:
update-by-query: update_by_query:
index: test index: test
conflicts: proceed conflicts: proceed
- match: {updated: 0} - match: {updated: 0}
@ -148,7 +148,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: twitter index: twitter
body: body:
query: query:
@ -178,7 +178,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: twitter index: twitter
size: 1 size: 1
- match: {updated: 1} - match: {updated: 1}
@ -218,7 +218,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: test index: test
scroll_size: 1 scroll_size: 1
- match: {batches: 3} - match: {batches: 3}

View File

@ -8,7 +8,7 @@
body: { "text": "test" } body: { "text": "test" }
- do: - do:
catch: /conflicts may only be .* but was \[cat\]/ catch: /conflicts may only be .* but was \[cat\]/
update-by-query: update_by_query:
index: test index: test
conflicts: cat conflicts: cat
@ -22,7 +22,7 @@
body: { "text": "test" } body: { "text": "test" }
- do: - do:
catch: /Failed to parse int parameter \[scroll_size\] with value \[cat\]/ catch: /Failed to parse int parameter \[scroll_size\] with value \[cat\]/
update-by-query: update_by_query:
index: test index: test
scroll_size: cat scroll_size: cat
@ -36,6 +36,6 @@
body: { "text": "test" } body: { "text": "test" }
- do: - do:
catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/ catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/
update-by-query: update_by_query:
index: test index: test
size: -4 size: -4

View File

@ -43,7 +43,7 @@
- match: { hits.total: 0 } - match: { hits.total: 0 }
- do: - do:
update-by-query: update_by_query:
index: test index: test
- do: - do:
indices.refresh: {} indices.refresh: {}

View File

@ -1,5 +1,5 @@
--- ---
"update-by-query increments the version number": "update_by_query increments the version number":
- do: - do:
index: index:
index: test index: test
@ -10,7 +10,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: test index: test
- match: {updated: 1} - match: {updated: 1}
- match: {version_conflicts: 0} - match: {version_conflicts: 0}

View File

@ -21,14 +21,14 @@
- do: - do:
catch: unavailable catch: unavailable
update-by-query: update_by_query:
index: test index: test
timeout: 1s timeout: 1s
- match: - match:
failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.\[BulkShardRequest.to.\[test\].containing.\[1\].requests\]/ failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.\[BulkShardRequest.to.\[test\].containing.\[1\].requests\]/
- do: - do:
update-by-query: update_by_query:
index: test index: test
consistency: one consistency: one
- match: {failures: []} - match: {failures: []}

View File

@ -1,5 +1,5 @@
--- ---
"Update a document using update-by-query": "Update a document using update_by_query":
- do: - do:
ingest.put_pipeline: ingest.put_pipeline:
id: "test_ingest" id: "test_ingest"
@ -25,7 +25,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: twitter index: twitter
refresh: true refresh: true
pipeline: test_ingest pipeline: test_ingest

View File

@ -29,7 +29,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: test index: test
scroll_size: 1 scroll_size: 1
requests_per_second: 1 requests_per_second: 1

View File

@ -53,10 +53,6 @@
"type" : "string", "type" : "string",
"description" : "Specific routing value" "description" : "Specific routing value"
}, },
"source": {
"type" : "string",
"description" : "The URL-encoded query definition (instead of using the request body)"
},
"timeout": { "timeout": {
"type" : "time", "type" : "time",
"description" : "Explicit operation timeout" "description" : "Explicit operation timeout"

View File

@ -1,5 +1,5 @@
--- ---
"Update a document using update-by-query": "Update a document using update_by_query":
- do: - do:
index: index:
index: twitter index: twitter
@ -10,7 +10,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
index: twitter index: twitter
refresh: true refresh: true
body: body:
@ -46,7 +46,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
refresh: true refresh: true
index: twitter index: twitter
body: body:
@ -91,7 +91,7 @@
indices.refresh: {} indices.refresh: {}
- do: - do:
update-by-query: update_by_query:
refresh: true refresh: true
index: twitter index: twitter
body: body:
@ -114,7 +114,7 @@
- do: - do:
catch: /Invalid fields added to ctx \[junk\]/ catch: /Invalid fields added to ctx \[junk\]/
update-by-query: update_by_query:
index: twitter index: twitter
body: body:
script: script:
@ -133,7 +133,7 @@
- do: - do:
catch: /Modifying \[_id\] not allowed/ catch: /Modifying \[_id\] not allowed/
update-by-query: update_by_query:
index: twitter index: twitter
body: body:
script: script:

View File

@ -11,7 +11,7 @@
- do: - do:
catch: request catch: request
update-by-query: update_by_query:
index: twitter index: twitter
refresh: true refresh: true
body: body:

View File

@ -11,7 +11,7 @@
- do: - do:
catch: request_timeout catch: request_timeout
update-by-query: update_by_query:
index: twitter index: twitter
refresh: true refresh: true
search_timeout: 10ms search_timeout: 10ms

View File

@ -11,7 +11,7 @@
- do: - do:
catch: request catch: request
update-by-query: update_by_query:
index: source index: source
body: body:
query: query:

View File

@ -34,7 +34,8 @@
} }
}, },
"body": { "body": {
"description": "The search definition using the Query DSL and the prototype for the index request." "description": "The search definition using the Query DSL and the prototype for the index request.",
"required": true
} }
} }
} }

View File

@ -1,5 +1,5 @@
{ {
"update-by-query": { "update_by_query": {
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html", "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html",
"methods": ["POST"], "methods": ["POST"],
"url": { "url": {
@ -192,7 +192,7 @@
"scroll_size": { "scroll_size": {
"type": "integer", "type": "integer",
"defaut_value": 100, "defaut_value": 100,
"description": "Size on the scroll request powering the update-by-query" "description": "Size on the scroll request powering the update_by_query"
}, },
"wait_for_completion": { "wait_for_completion": {
"type" : "boolean", "type" : "boolean",