Merge branch 'master' into die_cwd_die
This commit is contained in:
commit
6bd69b74f1
|
@ -34,6 +34,10 @@ h2. Getting Started
|
||||||
|
|
||||||
First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
|
First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
|
||||||
|
|
||||||
|
h3. Requirements
|
||||||
|
|
||||||
|
You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information.
|
||||||
|
|
||||||
h3. Installation
|
h3. Installation
|
||||||
|
|
||||||
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
||||||
|
|
|
@ -513,7 +513,7 @@ def publish_repositories(version, dry_run=True):
|
||||||
else:
|
else:
|
||||||
print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version)
|
print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version)
|
||||||
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
|
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
|
||||||
run('dev-tools/build_repositories.sh %s', src_branch)
|
run('dev-tools/build_repositories.sh %s' % src_branch)
|
||||||
|
|
||||||
def print_sonatype_notice():
|
def print_sonatype_notice():
|
||||||
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
|
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
|
||||||
|
|
|
@ -32,12 +32,12 @@ set -e
|
||||||
## GPG_KEY_ID: Key id of your GPG key
|
## GPG_KEY_ID: Key id of your GPG key
|
||||||
## AWS_ACCESS_KEY_ID: AWS access key id
|
## AWS_ACCESS_KEY_ID: AWS access key id
|
||||||
## AWS_SECRET_ACCESS_KEY: AWS secret access key
|
## AWS_SECRET_ACCESS_KEY: AWS secret access key
|
||||||
## S3_BUCKET_SYNC_TO Bucket to write packages to, defaults to packages.elasticsearch.org/elasticsearch
|
## S3_BUCKET_SYNC_TO Bucket to write packages to, should be set packages.elasticsearch.org for a regular release
|
||||||
##
|
##
|
||||||
##
|
##
|
||||||
## optional
|
## optional
|
||||||
##
|
##
|
||||||
## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org/elasticsearch
|
## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org
|
||||||
## KEEP_DIRECTORIES Allows to keep all the generated directory structures for debugging
|
## KEEP_DIRECTORIES Allows to keep all the generated directory structures for debugging
|
||||||
## GPG_KEYRING Configure GPG keyring home, defaults to ~/.gnupg/
|
## GPG_KEYRING Configure GPG keyring home, defaults to ~/.gnupg/
|
||||||
##
|
##
|
||||||
|
@ -51,7 +51,7 @@ set -e
|
||||||
|
|
||||||
# No trailing slashes!
|
# No trailing slashes!
|
||||||
if [ -z $S3_BUCKET_SYNC_FROM ] ; then
|
if [ -z $S3_BUCKET_SYNC_FROM ] ; then
|
||||||
S3_BUCKET_SYNC_FROM="packages.elasticsearch.org/elasticsearch"
|
S3_BUCKET_SYNC_FROM="packages.elasticsearch.org"
|
||||||
fi
|
fi
|
||||||
if [ ! -z $GPG_KEYRING ] ; then
|
if [ ! -z $GPG_KEYRING ] ; then
|
||||||
GPG_HOMEDIR="--homedir ${GPG_KEYRING}"
|
GPG_HOMEDIR="--homedir ${GPG_KEYRING}"
|
||||||
|
@ -156,7 +156,7 @@ centosdir=$tempdir/repository/elasticsearch/$version/centos
|
||||||
mkdir -p $centosdir
|
mkdir -p $centosdir
|
||||||
|
|
||||||
echo "RPM: Syncing repository for version $version into $centosdir"
|
echo "RPM: Syncing repository for version $version into $centosdir"
|
||||||
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/centos/ $centosdir
|
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/centos/ $centosdir
|
||||||
|
|
||||||
rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm
|
rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm
|
||||||
echo "RPM: Copying $rpm into $centosdor"
|
echo "RPM: Copying $rpm into $centosdor"
|
||||||
|
@ -191,7 +191,7 @@ mkdir -p $debbasedir
|
||||||
|
|
||||||
echo "DEB: Syncing debian repository of version $version to $debbasedir"
|
echo "DEB: Syncing debian repository of version $version to $debbasedir"
|
||||||
# sync all former versions into directory
|
# sync all former versions into directory
|
||||||
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/debian/ $debbasedir
|
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/debian/ $debbasedir
|
||||||
|
|
||||||
# create directories in case of a new release so that syncing did not create this structure
|
# create directories in case of a new release so that syncing did not create this structure
|
||||||
mkdir -p $debbasedir/dists/stable/main/binary-all
|
mkdir -p $debbasedir/dists/stable/main/binary-all
|
||||||
|
|
|
@ -20,22 +20,3 @@ example:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
[float]
|
|
||||||
[[include-exclude]]
|
|
||||||
==== Includes / Excludes
|
|
||||||
|
|
||||||
Allow to specify paths in the source that would be included / excluded
|
|
||||||
when it's stored, supporting `*` as wildcard annotation. For example:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"my_type" : {
|
|
||||||
"_source" : {
|
|
||||||
"includes" : ["path1.*", "path2.*"],
|
|
||||||
"excludes" : ["path3.*"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
|
@ -67,8 +67,3 @@ the fact that the following JSON document is perfectly fine:
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
Note also, that thanks to the fact that we used the `index_name` to use
|
|
||||||
the non plural form (`tag` instead of `tags`), we can actually refer to
|
|
||||||
the field using the `index_name` as well. For example, we can execute a
|
|
||||||
query using `tweet.tags:wow` or `tweet.tag:wow`. We could, of course,
|
|
||||||
name the field as `tag` and skip the `index_name` all together).
|
|
||||||
|
|
|
@ -270,7 +270,7 @@ to provide special features. They now have limited configuration options.
|
||||||
* `_field_names` configuration is limited to disabling the field.
|
* `_field_names` configuration is limited to disabling the field.
|
||||||
* `_size` configuration is limited to enabling the field.
|
* `_size` configuration is limited to enabling the field.
|
||||||
|
|
||||||
=== Boolean fields
|
==== Boolean fields
|
||||||
|
|
||||||
Boolean fields used to have a string fielddata with `F` meaning `false` and `T`
|
Boolean fields used to have a string fielddata with `F` meaning `false` and `T`
|
||||||
meaning `true`. They have been refactored to use numeric fielddata, with `0`
|
meaning `true`. They have been refactored to use numeric fielddata, with `0`
|
||||||
|
@ -302,10 +302,14 @@ the user-friendly representation of boolean fields: `false`/`true`:
|
||||||
]
|
]
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
=== Murmur3 Fields
|
==== Murmur3 Fields
|
||||||
Fields of type `murmur3` can no longer change `doc_values` or `index` setting.
|
Fields of type `murmur3` can no longer change `doc_values` or `index` setting.
|
||||||
They are always stored with doc values, and not indexed.
|
They are always stored with doc values, and not indexed.
|
||||||
|
|
||||||
|
==== Source field configuration
|
||||||
|
The `_source` field no longer supports `includes` and `excludes` paramters. When
|
||||||
|
`_source` is enabled, the entire original source will be stored.
|
||||||
|
|
||||||
=== Codecs
|
=== Codecs
|
||||||
|
|
||||||
It is no longer possible to specify per-field postings and doc values formats
|
It is no longer possible to specify per-field postings and doc values formats
|
||||||
|
@ -345,6 +349,11 @@ Deprecated script parameters `id`, `file`, and `scriptField` have been removed
|
||||||
from all scriptable APIs. `script_id`, `script_file` and `script` should be used
|
from all scriptable APIs. `script_id`, `script_file` and `script` should be used
|
||||||
in their place.
|
in their place.
|
||||||
|
|
||||||
|
=== Groovy scripts sandbox
|
||||||
|
|
||||||
|
The groovy sandbox and related settings have been removed. Groovy is now a non
|
||||||
|
sandboxed scripting language, without any option to turn the sandbox on.
|
||||||
|
|
||||||
=== Plugins making use of scripts
|
=== Plugins making use of scripts
|
||||||
|
|
||||||
Plugins that make use of scripts must register their own script context through
|
Plugins that make use of scripts must register their own script context through
|
||||||
|
@ -410,3 +419,26 @@ a single `expand_wildcards` parameter. See <<multi-index,the multi-index docs>>
|
||||||
The `_shutdown` API has been removed without a replacement. Nodes should be managed via operating
|
The `_shutdown` API has been removed without a replacement. Nodes should be managed via operating
|
||||||
systems and the provided start/stop scripts.
|
systems and the provided start/stop scripts.
|
||||||
|
|
||||||
|
=== Analyze API
|
||||||
|
|
||||||
|
The Analyze API return 0 as first Token's position instead of 1.
|
||||||
|
|
||||||
|
=== Multiple data.path striping
|
||||||
|
|
||||||
|
Previously, if the `data.path` setting listed multiple data paths, then a
|
||||||
|
shard would be ``striped'' across all paths by writing a whole file to each
|
||||||
|
path in turn (in accordance with the `index.store.distributor` setting). The
|
||||||
|
result was that the files from a single segment in a shard could be spread
|
||||||
|
across multiple disks, and the failure of any one disk could corrupt multiple
|
||||||
|
shards.
|
||||||
|
|
||||||
|
This striping is no longer supported. Instead, different shards may be
|
||||||
|
allocated to different paths, but all of the files in a single shard will be
|
||||||
|
written to the same path.
|
||||||
|
|
||||||
|
If striping is detected while starting Elasticsearch 2.0.0 or later, all of
|
||||||
|
the files belonging to the same shard will be migrated to the same path. If
|
||||||
|
there is not enough disk space to complete this migration, the upgrade will be
|
||||||
|
cancelled and can only be resumed once enough disk space is made available.
|
||||||
|
|
||||||
|
The `index.store.distributor` setting has also been removed.
|
||||||
|
|
|
@ -227,7 +227,7 @@ several attributes, for example:
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
curl -XPUT localhost:9200/test/_settings -d '{
|
curl -XPUT localhost:9200/test/_settings -d '{
|
||||||
"index.routing.allocation.include.group1" : "xxx"
|
"index.routing.allocation.include.group1" : "xxx",
|
||||||
"index.routing.allocation.include.group2" : "yyy",
|
"index.routing.allocation.include.group2" : "yyy",
|
||||||
"index.routing.allocation.exclude.group3" : "zzz",
|
"index.routing.allocation.exclude.group3" : "zzz",
|
||||||
"index.routing.allocation.require.group4" : "aaa"
|
"index.routing.allocation.require.group4" : "aaa"
|
||||||
|
|
|
@ -11,26 +11,11 @@ The scripting module uses by default http://groovy.codehaus.org/[groovy]
|
||||||
scripting language with some extensions. Groovy is used since it is extremely
|
scripting language with some extensions. Groovy is used since it is extremely
|
||||||
fast and very simple to use.
|
fast and very simple to use.
|
||||||
|
|
||||||
.Groovy dynamic scripting disabled by default from v1.4.3
|
.Groovy dynamic scripting off by default from v1.4.3
|
||||||
[IMPORTANT]
|
[IMPORTANT]
|
||||||
===================================================
|
===================================================
|
||||||
|
|
||||||
Elasticsearch versions 1.3.0-1.3.7 and 1.4.0-1.4.2 have a vulnerability in the
|
Groovy dynamic scripting is off by default, preventing dynamic Groovy scripts
|
||||||
Groovy scripting engine. The vulnerability allows an attacker to construct
|
|
||||||
Groovy scripts that escape the sandbox and execute shell commands as the user
|
|
||||||
running the Elasticsearch Java VM.
|
|
||||||
|
|
||||||
If you are running a vulnerable version of Elasticsearch, you should either
|
|
||||||
upgrade to at least v1.3.8 or v1.4.3, or disable dynamic Groovy scripts by
|
|
||||||
adding this setting to the `config/elasticsearch.yml` file in all nodes in the
|
|
||||||
cluster:
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
-----------------------------------
|
|
||||||
script.groovy.sandbox.enabled: false
|
|
||||||
-----------------------------------
|
|
||||||
|
|
||||||
This will turn off the Groovy sandbox, thus preventing dynamic Groovy scripts
|
|
||||||
from being accepted as part of a request or retrieved from the special
|
from being accepted as part of a request or retrieved from the special
|
||||||
`.scripts` index. You will still be able to use Groovy scripts stored in files
|
`.scripts` index. You will still be able to use Groovy scripts stored in files
|
||||||
in the `config/scripts/` directory on every node.
|
in the `config/scripts/` directory on every node.
|
||||||
|
@ -351,39 +336,6 @@ The default scripting language (assuming no `lang` parameter is provided) is
|
||||||
`groovy`. In order to change it, set the `script.default_lang` to the
|
`groovy`. In order to change it, set the `script.default_lang` to the
|
||||||
appropriate language.
|
appropriate language.
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Groovy Sandboxing
|
|
||||||
|
|
||||||
Elasticsearch sandboxes Groovy scripts that are compiled and executed in order
|
|
||||||
to ensure they don't perform unwanted actions. There are a number of options
|
|
||||||
that can be used for configuring this sandbox:
|
|
||||||
|
|
||||||
`script.groovy.sandbox.receiver_whitelist`::
|
|
||||||
|
|
||||||
Comma-separated list of string classes for objects that may have methods
|
|
||||||
invoked.
|
|
||||||
|
|
||||||
`script.groovy.sandbox.package_whitelist`::
|
|
||||||
|
|
||||||
Comma-separated list of packages under which new objects may be constructed.
|
|
||||||
|
|
||||||
`script.groovy.sandbox.class_whitelist`::
|
|
||||||
|
|
||||||
Comma-separated list of classes that are allowed to be constructed.
|
|
||||||
|
|
||||||
`script.groovy.sandbox.method_blacklist`::
|
|
||||||
|
|
||||||
Comma-separated list of methods that are never allowed to be invoked,
|
|
||||||
regardless of target object.
|
|
||||||
|
|
||||||
`script.groovy.sandbox.enabled`::
|
|
||||||
|
|
||||||
Flag to enable the sandbox (defaults to `false` meaning the sandbox is
|
|
||||||
disabled).
|
|
||||||
|
|
||||||
When specifying whitelist or blacklist settings for the groovy sandbox, all
|
|
||||||
options replace the current whitelist, they are not additive.
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Automatic Script Reloading
|
=== Automatic Script Reloading
|
||||||
|
|
||||||
|
|
|
@ -175,7 +175,8 @@ doing so would look like:
|
||||||
"field_value_factor": {
|
"field_value_factor": {
|
||||||
"field": "popularity",
|
"field": "popularity",
|
||||||
"factor": 1.2,
|
"factor": 1.2,
|
||||||
"modifier": "sqrt"
|
"modifier": "sqrt",
|
||||||
|
"missing": 1
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
@ -193,6 +194,8 @@ There are a number of options for the `field_value_factor` function:
|
||||||
|`modifier` |Modifier to apply to the field value, can be one of: `none`, `log`,
|
|`modifier` |Modifier to apply to the field value, can be one of: `none`, `log`,
|
||||||
`log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`.
|
`log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`.
|
||||||
Defaults to `none`.
|
Defaults to `none`.
|
||||||
|
|`missing` |Value used if the document doesn't have that field. The modifier
|
||||||
|
and factor are still applied to it as though it were read from the document.
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
||||||
Keep in mind that taking the log() of 0, or the square root of a negative number
|
Keep in mind that taking the log() of 0, or the square root of a negative number
|
||||||
|
|
|
@ -84,24 +84,28 @@ $ curl -XPUT 'http://localhost:9200/transactions/stock/1' -d '
|
||||||
"type": "sale",
|
"type": "sale",
|
||||||
"amount": 80
|
"amount": 80
|
||||||
}
|
}
|
||||||
|
'
|
||||||
|
|
||||||
$ curl -XPUT 'http://localhost:9200/transactions/stock/2' -d '
|
$ curl -XPUT 'http://localhost:9200/transactions/stock/2' -d '
|
||||||
{
|
{
|
||||||
"type": "cost",
|
"type": "cost",
|
||||||
"amount": 10
|
"amount": 10
|
||||||
}
|
}
|
||||||
|
'
|
||||||
|
|
||||||
$ curl -XPUT 'http://localhost:9200/transactions/stock/3' -d '
|
$ curl -XPUT 'http://localhost:9200/transactions/stock/3' -d '
|
||||||
{
|
{
|
||||||
"type": "cost",
|
"type": "cost",
|
||||||
"amount": 30
|
"amount": 30
|
||||||
}
|
}
|
||||||
|
'
|
||||||
|
|
||||||
$ curl -XPUT 'http://localhost:9200/transactions/stock/4' -d '
|
$ curl -XPUT 'http://localhost:9200/transactions/stock/4' -d '
|
||||||
{
|
{
|
||||||
"type": "sale",
|
"type": "sale",
|
||||||
"amount": 130
|
"amount": 130
|
||||||
}
|
}
|
||||||
|
'
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is
|
Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is
|
||||||
|
|
|
@ -18,30 +18,22 @@ on the node. Can hold multiple locations. | {path.home}/data| path.data
|
||||||
| plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins
|
| plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
||||||
The multiple data locations allows to stripe it. The striping is simple,
|
Multiple `data` paths may be specified, in order to spread data across
|
||||||
placing whole files in one of the locations, and deciding where to place
|
multiple disks or locations, but all of the files from a single shard will be
|
||||||
the file based on the value of the `index.store.distributor` setting:
|
written to the same path. This can be configured as follows:
|
||||||
|
|
||||||
* `least_used` (default) always selects the directory with the most
|
---------------------------------
|
||||||
available space +
|
path.data: /mnt/first,/mnt/second
|
||||||
* `random` selects directories at random. The probability of selecting
|
---------------------------------
|
||||||
a particular directory is proportional to amount of available space in
|
|
||||||
this directory.
|
|
||||||
|
|
||||||
Note, there are no multiple copies of the same data, in that, its
|
Or in an array format:
|
||||||
similar to RAID 0. Though simple, it should provide a good solution for
|
|
||||||
people that don't want to mess with RAID. Here is how it is configured:
|
|
||||||
|
|
||||||
---------------------------------
|
----------------------------------------
|
||||||
path.data: /mnt/first,/mnt/second
|
path.data: ["/mnt/first", "/mnt/second"]
|
||||||
---------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
Or the in an array format:
|
|
||||||
|
|
||||||
----------------------------------------
|
|
||||||
path.data: ["/mnt/first", "/mnt/second"]
|
|
||||||
----------------------------------------
|
|
||||||
|
|
||||||
|
TIP: To stripe shards across multiple disks, please use a RAID driver
|
||||||
|
instead.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[default-paths]]
|
[[default-paths]]
|
||||||
|
|
2
pom.xml
2
pom.xml
|
@ -73,7 +73,7 @@
|
||||||
<repository>
|
<repository>
|
||||||
<id>lucene-snapshots</id>
|
<id>lucene-snapshots</id>
|
||||||
<name>Lucene Snapshots</name>
|
<name>Lucene Snapshots</name>
|
||||||
<url>https://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision}</url>
|
<url>http://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision}</url>
|
||||||
</repository>
|
</repository>
|
||||||
</repositories>
|
</repositories>
|
||||||
|
|
||||||
|
|
|
@ -41,10 +41,6 @@
|
||||||
"routing": {
|
"routing": {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"description" : "Specific routing value"
|
"description" : "Specific routing value"
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded query definition (instead of using the request body)"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -23,10 +23,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"routing": {
|
"routing": {
|
||||||
"type": "list",
|
"type": "list",
|
||||||
"description": "A comma-separated list of specific routing values"
|
"description": "A comma-separated list of specific routing values"
|
||||||
|
|
|
@ -1,75 +0,0 @@
|
||||||
{
|
|
||||||
"delete_by_query": {
|
|
||||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html",
|
|
||||||
"methods": ["DELETE"],
|
|
||||||
"url": {
|
|
||||||
"path": "/{index}/_query",
|
|
||||||
"paths": ["/{index}/_query", "/{index}/{type}/_query"],
|
|
||||||
"parts": {
|
|
||||||
"index": {
|
|
||||||
"type" : "list",
|
|
||||||
"required": true,
|
|
||||||
"description" : "A comma-separated list of indices to restrict the operation; use `_all` to perform the operation on all indices"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type" : "list",
|
|
||||||
"description" : "A comma-separated list of types to restrict the operation"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"params": {
|
|
||||||
"analyzer": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The analyzer to use for the query string"
|
|
||||||
},
|
|
||||||
"consistency": {
|
|
||||||
"type" : "enum",
|
|
||||||
"options" : ["one", "quorum", "all"],
|
|
||||||
"description" : "Specific write consistency setting for the operation"
|
|
||||||
},
|
|
||||||
"default_operator": {
|
|
||||||
"type" : "enum",
|
|
||||||
"options" : ["AND","OR"],
|
|
||||||
"default" : "OR",
|
|
||||||
"description" : "The default operator for query string query (AND or OR)"
|
|
||||||
},
|
|
||||||
"df": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The field to use as default where no field prefix is given in the query string"
|
|
||||||
},
|
|
||||||
"ignore_unavailable": {
|
|
||||||
"type" : "boolean",
|
|
||||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
|
||||||
},
|
|
||||||
"allow_no_indices": {
|
|
||||||
"type" : "boolean",
|
|
||||||
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
|
||||||
},
|
|
||||||
"expand_wildcards": {
|
|
||||||
"type" : "enum",
|
|
||||||
"options" : ["open","closed","none","all"],
|
|
||||||
"default" : "open",
|
|
||||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
|
||||||
},
|
|
||||||
"q": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "Query in the Lucene query string syntax"
|
|
||||||
},
|
|
||||||
"routing": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "Specific routing value"
|
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded query definition (instead of using the request body)"
|
|
||||||
},
|
|
||||||
"timeout": {
|
|
||||||
"type" : "time",
|
|
||||||
"description" : "Explicit operation timeout"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"body": {
|
|
||||||
"description" : "A query to restrict the operation specified with the Query DSL"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -69,10 +69,6 @@
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"description" : "Specific routing value"
|
"description" : "Specific routing value"
|
||||||
},
|
},
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded query definition (instead of using the request body)"
|
|
||||||
},
|
|
||||||
"_source": {
|
"_source": {
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
"description" : "True or false to return the _source field or not, or a list of fields to return"
|
"description" : "True or false to return the _source field or not, or a list of fields to return"
|
||||||
|
|
|
@ -12,10 +12,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"analyzer": {
|
"analyzer": {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"description" : "The name of the analyzer to use"
|
"description" : "The name of the analyzer to use"
|
||||||
|
|
|
@ -37,10 +37,6 @@
|
||||||
"operation_threading": {
|
"operation_threading": {
|
||||||
"description" : "TODO: ?"
|
"description" : "TODO: ?"
|
||||||
},
|
},
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded query definition (instead of using the request body)"
|
|
||||||
},
|
|
||||||
"q": {
|
"q": {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"description" : "Query in the Lucene query string syntax"
|
"description" : "Query in the Lucene query string syntax"
|
||||||
|
|
|
@ -16,10 +16,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"fields": {
|
"fields": {
|
||||||
"type": "list",
|
"type": "list",
|
||||||
"description" : "A comma-separated list of fields to return in the response"
|
"description" : "A comma-separated list of fields to return in the response"
|
||||||
|
|
|
@ -23,10 +23,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"boost_terms": {
|
"boost_terms": {
|
||||||
"type" : "number",
|
"type" : "number",
|
||||||
"description" : "The boost factor"
|
"description" : "The boost factor"
|
||||||
|
|
|
@ -16,10 +16,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"ignore_unavailable": {
|
"ignore_unavailable": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
"description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||||
|
|
|
@ -16,10 +16,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"search_type": {
|
"search_type": {
|
||||||
"type" : "enum",
|
"type" : "enum",
|
||||||
"options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"],
|
"options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"],
|
||||||
|
|
|
@ -16,10 +16,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params" : {
|
"params" : {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"ids" : {
|
"ids" : {
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
"description" : "A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body",
|
"description" : "A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body",
|
||||||
|
@ -97,4 +93,4 @@
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,10 +23,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"routing": {
|
"routing": {
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
"description" : "A comma-separated list of specific routing values"
|
"description" : "A comma-separated list of specific routing values"
|
||||||
|
|
|
@ -12,10 +12,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"scroll": {
|
"scroll": {
|
||||||
"type" : "duration",
|
"type" : "duration",
|
||||||
"description" : "Specify how long a consistent view of the index should be maintained for scrolled search"
|
"description" : "Specify how long a consistent view of the index should be maintained for scrolled search"
|
||||||
|
|
|
@ -101,10 +101,6 @@
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
"description" : "A comma-separated list of <field>:<direction> pairs"
|
"description" : "A comma-separated list of <field>:<direction> pairs"
|
||||||
},
|
},
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition using the Query DSL (instead of using request body)"
|
|
||||||
},
|
|
||||||
"_source": {
|
"_source": {
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
"description" : "True or false to return the _source field or not, or a list of fields to return"
|
"description" : "True or false to return the _source field or not, or a list of fields to return"
|
||||||
|
|
|
@ -41,10 +41,6 @@
|
||||||
"routing": {
|
"routing": {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"description" : "Specific routing value"
|
"description" : "Specific routing value"
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded query definition (instead of using the request body)"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -16,10 +16,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params" : {
|
"params" : {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"ignore_unavailable": {
|
"ignore_unavailable": {
|
||||||
"type" : "boolean",
|
"type" : "boolean",
|
||||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||||
|
|
|
@ -33,10 +33,6 @@
|
||||||
"routing": {
|
"routing": {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"description" : "Specific routing value"
|
"description" : "Specific routing value"
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition (instead of using request body)"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -22,10 +22,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"source": {
|
|
||||||
"type" : "string",
|
|
||||||
"description" : "The URL-encoded request definition"
|
|
||||||
},
|
|
||||||
"term_statistics" : {
|
"term_statistics" : {
|
||||||
"type" : "boolean",
|
"type" : "boolean",
|
||||||
"description" : "Specifies if total term frequency and document frequency should be returned.",
|
"description" : "Specifies if total term frequency and document frequency should be returned.",
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
---
|
|
||||||
"Basic delete_by_query":
|
|
||||||
- do:
|
|
||||||
index:
|
|
||||||
index: test_1
|
|
||||||
type: test
|
|
||||||
id: 1
|
|
||||||
body: { foo: bar }
|
|
||||||
|
|
||||||
- do:
|
|
||||||
index:
|
|
||||||
index: test_1
|
|
||||||
type: test
|
|
||||||
id: 2
|
|
||||||
body: { foo: baz }
|
|
||||||
|
|
||||||
- do:
|
|
||||||
index:
|
|
||||||
index: test_1
|
|
||||||
type: test
|
|
||||||
id: 3
|
|
||||||
body: { foo: foo }
|
|
||||||
|
|
||||||
- do:
|
|
||||||
indices.refresh: {}
|
|
||||||
|
|
||||||
- do:
|
|
||||||
delete_by_query:
|
|
||||||
index: test_1
|
|
||||||
body:
|
|
||||||
query:
|
|
||||||
match:
|
|
||||||
foo: bar
|
|
||||||
|
|
||||||
- do:
|
|
||||||
indices.refresh: {}
|
|
||||||
|
|
||||||
- do:
|
|
||||||
count:
|
|
||||||
index: test_1
|
|
||||||
|
|
||||||
- match: { count: 2 }
|
|
|
@ -21,7 +21,6 @@ package org.apache.lucene.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -97,7 +96,7 @@ public class PrefixAnalyzer extends Analyzer {
|
||||||
this.currentPrefix = null;
|
this.currentPrefix = null;
|
||||||
this.separator = separator;
|
this.separator = separator;
|
||||||
if (prefixes == null || !prefixes.iterator().hasNext()) {
|
if (prefixes == null || !prefixes.iterator().hasNext()) {
|
||||||
throw new ElasticsearchIllegalArgumentException("one or more prefixes needed");
|
throw new IllegalArgumentException("one or more prefixes needed");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.apache.lucene.store;
|
package org.apache.lucene.store;
|
||||||
|
|
||||||
import org.apache.lucene.store.RateLimiter.SimpleRateLimiter;
|
import org.apache.lucene.store.RateLimiter.SimpleRateLimiter;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
|
||||||
|
@ -42,7 +41,7 @@ public class StoreRateLimiting {
|
||||||
MERGE,
|
MERGE,
|
||||||
ALL;
|
ALL;
|
||||||
|
|
||||||
public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
|
public static Type fromString(String type) {
|
||||||
if ("none".equalsIgnoreCase(type)) {
|
if ("none".equalsIgnoreCase(type)) {
|
||||||
return NONE;
|
return NONE;
|
||||||
} else if ("merge".equalsIgnoreCase(type)) {
|
} else if ("merge".equalsIgnoreCase(type)) {
|
||||||
|
@ -50,7 +49,7 @@ public class StoreRateLimiting {
|
||||||
} else if ("all".equalsIgnoreCase(type)) {
|
} else if ("all".equalsIgnoreCase(type)) {
|
||||||
return ALL;
|
return ALL;
|
||||||
}
|
}
|
||||||
throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
|
throw new IllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +87,7 @@ public class StoreRateLimiting {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setType(String type) throws ElasticsearchIllegalArgumentException {
|
public void setType(String type) {
|
||||||
this.type = Type.fromString(type);
|
this.type = Type.fromString(type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -194,7 +194,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||||
if (this instanceof ElasticsearchWrapperException) {
|
if (this instanceof ElasticsearchWrapperException) {
|
||||||
toXContent(builder, params, this);
|
toXContent(builder, params, this);
|
||||||
} else {
|
} else {
|
||||||
builder.field("type", getExceptionName(this));
|
builder.field("type", getExceptionName());
|
||||||
builder.field("reason", getMessage());
|
builder.field("reason", getMessage());
|
||||||
innerToXContent(builder, params);
|
innerToXContent(builder, params);
|
||||||
}
|
}
|
||||||
|
@ -261,7 +261,16 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||||
if (ex instanceof ElasticsearchException) {
|
if (ex instanceof ElasticsearchException) {
|
||||||
return ((ElasticsearchException) ex).guessRootCauses();
|
return ((ElasticsearchException) ex).guessRootCauses();
|
||||||
}
|
}
|
||||||
return new ElasticsearchException[0];
|
return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) {
|
||||||
|
@Override
|
||||||
|
protected String getExceptionName() {
|
||||||
|
return getExceptionName(getCause());
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
protected String getExceptionName() {
|
||||||
|
return getExceptionName(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch;
|
|
||||||
|
|
||||||
import org.elasticsearch.rest.RestStatus;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class ElasticsearchIllegalArgumentException extends ElasticsearchException {
|
|
||||||
|
|
||||||
public ElasticsearchIllegalArgumentException() {
|
|
||||||
super(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ElasticsearchIllegalArgumentException(String msg) {
|
|
||||||
super(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ElasticsearchIllegalArgumentException(String msg, Throwable cause) {
|
|
||||||
super(msg, cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public RestStatus status() {
|
|
||||||
return RestStatus.BAD_REQUEST;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class ElasticsearchIllegalStateException extends ElasticsearchException {
|
|
||||||
|
|
||||||
public ElasticsearchIllegalStateException() {
|
|
||||||
super(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ElasticsearchIllegalStateException(String msg) {
|
|
||||||
super(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ElasticsearchIllegalStateException(String msg, Throwable cause) {
|
|
||||||
super(msg, cause);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -459,12 +459,12 @@ public class Version {
|
||||||
/**
|
/**
|
||||||
* Return the {@link Version} of Elasticsearch that has been used to create an index given its settings.
|
* Return the {@link Version} of Elasticsearch that has been used to create an index given its settings.
|
||||||
*
|
*
|
||||||
* @throws ElasticsearchIllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED}
|
* @throws IllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED}
|
||||||
*/
|
*/
|
||||||
public static Version indexCreated(Settings indexSettings) {
|
public static Version indexCreated(Settings indexSettings) {
|
||||||
final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
|
final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
|
||||||
if (indexVersion == null) {
|
if (indexVersion == null) {
|
||||||
throw new ElasticsearchIllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]");
|
throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]");
|
||||||
}
|
}
|
||||||
return indexVersion;
|
return indexVersion;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,29 +35,29 @@ public interface ActionFuture<T> extends Future<T> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to {@link #get()}, just catching the {@link InterruptedException} and throwing
|
* Similar to {@link #get()}, just catching the {@link InterruptedException} and throwing
|
||||||
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
|
* an {@link IllegalStateException} instead. Also catches
|
||||||
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
||||||
* <p/>
|
* <p/>
|
||||||
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
||||||
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
||||||
* still accessible using {@link #getRootFailure()}.
|
* still accessible using {@link #getRootFailure()}.
|
||||||
*/
|
*/
|
||||||
T actionGet() throws ElasticsearchException;
|
T actionGet();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
||||||
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
|
* an {@link IllegalStateException} instead. Also catches
|
||||||
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
||||||
* <p/>
|
* <p/>
|
||||||
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
||||||
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
||||||
* still accessible using {@link #getRootFailure()}.
|
* still accessible using {@link #getRootFailure()}.
|
||||||
*/
|
*/
|
||||||
T actionGet(String timeout) throws ElasticsearchException;
|
T actionGet(String timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
||||||
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
|
* an {@link IllegalStateException} instead. Also catches
|
||||||
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
||||||
* <p/>
|
* <p/>
|
||||||
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
||||||
|
@ -66,29 +66,29 @@ public interface ActionFuture<T> extends Future<T> {
|
||||||
*
|
*
|
||||||
* @param timeoutMillis Timeout in millis
|
* @param timeoutMillis Timeout in millis
|
||||||
*/
|
*/
|
||||||
T actionGet(long timeoutMillis) throws ElasticsearchException;
|
T actionGet(long timeoutMillis);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
||||||
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
|
* an {@link IllegalStateException} instead. Also catches
|
||||||
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
||||||
* <p/>
|
* <p/>
|
||||||
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
||||||
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
||||||
* still accessible using {@link #getRootFailure()}.
|
* still accessible using {@link #getRootFailure()}.
|
||||||
*/
|
*/
|
||||||
T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException;
|
T actionGet(long timeout, TimeUnit unit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
|
||||||
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
|
* an {@link IllegalStateException} instead. Also catches
|
||||||
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
|
||||||
* <p/>
|
* <p/>
|
||||||
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
|
||||||
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
|
||||||
* still accessible using {@link #getRootFailure()}.
|
* still accessible using {@link #getRootFailure()}.
|
||||||
*/
|
*/
|
||||||
T actionGet(TimeValue timeout) throws ElasticsearchException;
|
T actionGet(TimeValue timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The root (possibly) wrapped failure.
|
* The root (possibly) wrapped failure.
|
||||||
|
|
|
@ -124,10 +124,6 @@ import org.elasticsearch.action.count.CountAction;
|
||||||
import org.elasticsearch.action.count.TransportCountAction;
|
import org.elasticsearch.action.count.TransportCountAction;
|
||||||
import org.elasticsearch.action.delete.DeleteAction;
|
import org.elasticsearch.action.delete.DeleteAction;
|
||||||
import org.elasticsearch.action.delete.TransportDeleteAction;
|
import org.elasticsearch.action.delete.TransportDeleteAction;
|
||||||
import org.elasticsearch.action.deletebyquery.DeleteByQueryAction;
|
|
||||||
import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction;
|
|
||||||
import org.elasticsearch.action.deletebyquery.TransportIndexDeleteByQueryAction;
|
|
||||||
import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction;
|
|
||||||
import org.elasticsearch.action.exists.ExistsAction;
|
import org.elasticsearch.action.exists.ExistsAction;
|
||||||
import org.elasticsearch.action.exists.TransportExistsAction;
|
import org.elasticsearch.action.exists.TransportExistsAction;
|
||||||
import org.elasticsearch.action.explain.ExplainAction;
|
import org.elasticsearch.action.explain.ExplainAction;
|
||||||
|
@ -284,8 +280,6 @@ public class ActionModule extends AbstractModule {
|
||||||
TransportShardMultiGetAction.class);
|
TransportShardMultiGetAction.class);
|
||||||
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||||
TransportShardBulkAction.class);
|
TransportShardBulkAction.class);
|
||||||
registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class,
|
|
||||||
TransportIndexDeleteByQueryAction.class, TransportShardDeleteByQueryAction.class);
|
|
||||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
|
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
|
||||||
TransportSearchDfsQueryThenFetchAction.class,
|
TransportSearchDfsQueryThenFetchAction.class,
|
||||||
TransportSearchQueryThenFetchAction.class,
|
TransportSearchQueryThenFetchAction.class,
|
||||||
|
|
|
@ -69,21 +69,21 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
||||||
/**
|
/**
|
||||||
* Short version of execute().actionGet().
|
* Short version of execute().actionGet().
|
||||||
*/
|
*/
|
||||||
public Response get() throws ElasticsearchException {
|
public Response get() {
|
||||||
return execute().actionGet();
|
return execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Short version of execute().actionGet().
|
* Short version of execute().actionGet().
|
||||||
*/
|
*/
|
||||||
public Response get(TimeValue timeout) throws ElasticsearchException {
|
public Response get(TimeValue timeout) {
|
||||||
return execute().actionGet(timeout);
|
return execute().actionGet(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Short version of execute().actionGet().
|
* Short version of execute().actionGet().
|
||||||
*/
|
*/
|
||||||
public Response get(String timeout) throws ElasticsearchException {
|
public Response get(String timeout) {
|
||||||
return execute().actionGet(timeout);
|
return execute().actionGet(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.action;
|
package org.elasticsearch.action;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -27,12 +28,12 @@ import java.util.List;
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class ActionRequestValidationException extends ElasticsearchIllegalArgumentException {
|
public class ActionRequestValidationException extends IllegalArgumentException {
|
||||||
|
|
||||||
private final List<String> validationErrors = new ArrayList<>();
|
private final List<String> validationErrors = new ArrayList<>();
|
||||||
|
|
||||||
public ActionRequestValidationException() {
|
public ActionRequestValidationException() {
|
||||||
super(null);
|
super("validation failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addValidationError(String error) {
|
public void addValidationError(String error) {
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action;
|
package org.elasticsearch.action;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -108,7 +107,7 @@ public enum ThreadingModel {
|
||||||
} else if (id == 3) {
|
} else if (id == 3) {
|
||||||
return OPERATION_LISTENER;
|
return OPERATION_LISTENER;
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("No threading model for [" + id + "]");
|
throw new IllegalArgumentException("No threading model for [" + id + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action;
|
package org.elasticsearch.action;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write Consistency Level control how many replicas should be active for a write operation to occur (a write operation
|
* Write Consistency Level control how many replicas should be active for a write operation to occur (a write operation
|
||||||
|
@ -53,7 +52,7 @@ public enum WriteConsistencyLevel {
|
||||||
} else if (value == 3) {
|
} else if (value == 3) {
|
||||||
return ALL;
|
return ALL;
|
||||||
}
|
}
|
||||||
throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
|
throw new IllegalArgumentException("No write consistency match [" + value + "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
public static WriteConsistencyLevel fromString(String value) {
|
public static WriteConsistencyLevel fromString(String value) {
|
||||||
|
@ -66,6 +65,6 @@ public enum WriteConsistencyLevel {
|
||||||
} else if (value.equals("all")) {
|
} else if (value.equals("all")) {
|
||||||
return ALL;
|
return ALL;
|
||||||
}
|
}
|
||||||
throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
|
throw new IllegalArgumentException("No write consistency match [" + value + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.health;
|
package org.elasticsearch.action.admin.cluster.health;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -48,7 +47,7 @@ public enum ClusterHealthStatus {
|
||||||
case 2:
|
case 2:
|
||||||
return RED;
|
return RED;
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalArgumentException("No cluster health status for value [" + value + "]");
|
throw new IllegalArgumentException("No cluster health status for value [" + value + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.cluster.health;
|
package org.elasticsearch.action.admin.cluster.health;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
|
@ -66,7 +65,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||||
if (request.waitForEvents() != null) {
|
if (request.waitForEvents() != null) {
|
||||||
final long endTime = System.currentTimeMillis() + request.timeout().millis();
|
final long endTime = System.currentTimeMillis() + request.timeout().millis();
|
||||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
|
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
|
||||||
|
@ -141,7 +140,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onClusterServiceClose() {
|
public void onClusterServiceClose() {
|
||||||
listener.onFailure(new ElasticsearchIllegalStateException("ClusterService was close during health call"));
|
listener.onFailure(new IllegalStateException("ClusterService was close during health call"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesOperationActio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected NodeHotThreads nodeOperation(NodeRequest request) throws ElasticsearchException {
|
protected NodeHotThreads nodeOperation(NodeRequest request) {
|
||||||
HotThreads hotThreads = new HotThreads()
|
HotThreads hotThreads = new HotThreads()
|
||||||
.busiestThreads(request.request.threads)
|
.busiestThreads(request.request.threads)
|
||||||
.type(request.request.type)
|
.type(request.request.type)
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class TransportNodesInfoAction extends TransportNodesOperationAction<Node
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) throws ElasticsearchException {
|
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
|
||||||
NodesInfoRequest request = nodeRequest.request;
|
NodesInfoRequest request = nodeRequest.request;
|
||||||
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||||
request.network(), request.transport(), request.http(), request.plugins());
|
request.network(), request.transport(), request.http(), request.plugins());
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction<Nod
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) throws ElasticsearchException {
|
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) {
|
||||||
NodesStatsRequest request = nodeStatsRequest.request;
|
NodesStatsRequest request = nodeStatsRequest.request;
|
||||||
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(), request.network(),
|
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(), request.network(),
|
||||||
request.fs(), request.transport(), request.http(), request.breaker());
|
request.fs(), request.transport(), request.http(), request.breaker());
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeOperatio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener<DeleteRepositoryResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener<DeleteRepositoryResponse> listener) {
|
||||||
repositoriesService.unregisterRepository(
|
repositoriesService.unregisterRepository(
|
||||||
new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name())
|
new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name())
|
||||||
.masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()),
|
.masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()),
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener<GetRepositoriesResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener<GetRepositoriesResponse> listener) {
|
||||||
MetaData metaData = state.metaData();
|
MetaData metaData = state.metaData();
|
||||||
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
|
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
|
||||||
if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) {
|
if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) {
|
||||||
|
|
|
@ -20,8 +20,7 @@
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.put;
|
package org.elasticsearch.action.admin.cluster.repositories.put;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
@ -218,7 +217,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||||
type(entry.getValue().toString());
|
type(entry.getValue().toString());
|
||||||
} else if (name.equals("settings")) {
|
} else if (name.equals("settings")) {
|
||||||
if (!(entry.getValue() instanceof Map)) {
|
if (!(entry.getValue() instanceof Map)) {
|
||||||
throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
|
throw new IllegalArgumentException("Malformed settings section, should include an inner object");
|
||||||
}
|
}
|
||||||
settings((Map<String, Object>) entry.getValue());
|
settings((Map<String, Object>) entry.getValue());
|
||||||
}
|
}
|
||||||
|
@ -236,7 +235,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
|
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
|
throw new IllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +259,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length).mapOrderedAndClose());
|
return source(XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
|
throw new IllegalArgumentException("failed to parse repository source", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,7 +273,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
|
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
|
throw new IllegalArgumentException("failed to parse template source", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeOperationAc
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener<PutRepositoryResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener<PutRepositoryResponse> listener) {
|
||||||
|
|
||||||
repositoriesService.registerRepository(
|
repositoriesService.registerRepository(
|
||||||
new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]",
|
new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]",
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener<VerifyRepositoryResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener<VerifyRepositoryResponse> listener) {
|
||||||
repositoriesService.verifyRepository(request.name(), new ActionListener<RepositoriesService.VerifyResponse>() {
|
repositoriesService.verifyRepository(request.name(), new ActionListener<RepositoriesService.VerifyResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(RepositoriesService.VerifyResponse verifyResponse) {
|
public void onResponse(RepositoriesService.VerifyResponse verifyResponse) {
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) {
|
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) {
|
||||||
|
|
||||||
private volatile ClusterState clusterStateToSend;
|
private volatile ClusterState clusterStateToSend;
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) {
|
||||||
final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder();
|
final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder();
|
||||||
final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder();
|
final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder();
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.shards;
|
package org.elasticsearch.action.admin.cluster.shards;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
|
@ -61,11 +60,11 @@ public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest<C
|
||||||
@Override
|
@Override
|
||||||
public ClusterSearchShardsRequest indices(String... indices) {
|
public ClusterSearchShardsRequest indices(String... indices) {
|
||||||
if (indices == null) {
|
if (indices == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("indices must not be null");
|
throw new IllegalArgumentException("indices must not be null");
|
||||||
} else {
|
} else {
|
||||||
for (int i = 0; i < indices.length; i++) {
|
for (int i = 0; i < indices.length; i++) {
|
||||||
if (indices[i] == null) {
|
if (indices[i] == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("indices[" + i + "] must not be null");
|
throw new IllegalArgumentException("indices[" + i + "] must not be null");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadO
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) {
|
||||||
ClusterState clusterState = clusterService.state();
|
ClusterState clusterState = clusterService.state();
|
||||||
String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
|
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.cluster.snapshots.create;
|
package org.elasticsearch.action.admin.cluster.snapshots.create;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
|
@ -379,13 +378,13 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
|
||||||
} else if (entry.getValue() instanceof ArrayList) {
|
} else if (entry.getValue() instanceof ArrayList) {
|
||||||
indices((ArrayList<String>) entry.getValue());
|
indices((ArrayList<String>) entry.getValue());
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
|
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||||
}
|
}
|
||||||
} else if (name.equals("partial")) {
|
} else if (name.equals("partial")) {
|
||||||
partial(nodeBooleanValue(entry.getValue()));
|
partial(nodeBooleanValue(entry.getValue()));
|
||||||
} else if (name.equals("settings")) {
|
} else if (name.equals("settings")) {
|
||||||
if (!(entry.getValue() instanceof Map)) {
|
if (!(entry.getValue() instanceof Map)) {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
|
throw new IllegalArgumentException("malformed settings section, should indices an inner object");
|
||||||
}
|
}
|
||||||
settings((Map<String, Object>) entry.getValue());
|
settings((Map<String, Object>) entry.getValue());
|
||||||
} else if (name.equals("include_global_state")) {
|
} else if (name.equals("include_global_state")) {
|
||||||
|
@ -407,7 +406,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
|
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
|
@ -436,7 +435,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
|
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
|
throw new IllegalArgumentException("failed to parse repository source", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
|
@ -452,7 +451,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse snapshot source", e);
|
throw new IllegalArgumentException("failed to parse snapshot source", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) {
|
||||||
SnapshotsService.SnapshotRequest snapshotRequest =
|
SnapshotsService.SnapshotRequest snapshotRequest =
|
||||||
new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
|
new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
|
||||||
.indices(request.indices())
|
.indices(request.indices())
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener<DeleteSnapshotResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener<DeleteSnapshotResponse> listener) {
|
||||||
SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot());
|
SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot());
|
||||||
snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
|
snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -66,7 +66,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) {
|
||||||
try {
|
try {
|
||||||
ImmutableList.Builder<SnapshotInfo> snapshotInfoBuilder = ImmutableList.builder();
|
ImmutableList.Builder<SnapshotInfo> snapshotInfoBuilder = ImmutableList.builder();
|
||||||
if (isAllSnapshots(request.snapshots())) {
|
if (isAllSnapshots(request.snapshots())) {
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.cluster.snapshots.restore;
|
package org.elasticsearch.action.admin.cluster.snapshots.restore;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
|
@ -493,7 +492,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
try {
|
try {
|
||||||
return source(source.bytes());
|
return source(source.bytes());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("Failed to build json for repository request", e);
|
throw new IllegalArgumentException("Failed to build json for repository request", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -512,13 +511,13 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
} else if (entry.getValue() instanceof ArrayList) {
|
} else if (entry.getValue() instanceof ArrayList) {
|
||||||
indices((ArrayList<String>) entry.getValue());
|
indices((ArrayList<String>) entry.getValue());
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
|
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||||
}
|
}
|
||||||
} else if (name.equals("partial")) {
|
} else if (name.equals("partial")) {
|
||||||
partial(nodeBooleanValue(entry.getValue()));
|
partial(nodeBooleanValue(entry.getValue()));
|
||||||
} else if (name.equals("settings")) {
|
} else if (name.equals("settings")) {
|
||||||
if (!(entry.getValue() instanceof Map)) {
|
if (!(entry.getValue() instanceof Map)) {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed settings section");
|
throw new IllegalArgumentException("malformed settings section");
|
||||||
}
|
}
|
||||||
settings((Map<String, Object>) entry.getValue());
|
settings((Map<String, Object>) entry.getValue());
|
||||||
} else if (name.equals("include_global_state")) {
|
} else if (name.equals("include_global_state")) {
|
||||||
|
@ -529,17 +528,17 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
if (entry.getValue() instanceof String) {
|
if (entry.getValue() instanceof String) {
|
||||||
renamePattern((String) entry.getValue());
|
renamePattern((String) entry.getValue());
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed rename_pattern");
|
throw new IllegalArgumentException("malformed rename_pattern");
|
||||||
}
|
}
|
||||||
} else if (name.equals("rename_replacement")) {
|
} else if (name.equals("rename_replacement")) {
|
||||||
if (entry.getValue() instanceof String) {
|
if (entry.getValue() instanceof String) {
|
||||||
renameReplacement((String) entry.getValue());
|
renameReplacement((String) entry.getValue());
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed rename_replacement");
|
throw new IllegalArgumentException("malformed rename_replacement");
|
||||||
}
|
}
|
||||||
} else if (name.equals("index_settings")) {
|
} else if (name.equals("index_settings")) {
|
||||||
if (!(entry.getValue() instanceof Map)) {
|
if (!(entry.getValue() instanceof Map)) {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed index_settings section");
|
throw new IllegalArgumentException("malformed index_settings section");
|
||||||
}
|
}
|
||||||
indexSettings((Map<String, Object>) entry.getValue());
|
indexSettings((Map<String, Object>) entry.getValue());
|
||||||
} else if (name.equals("ignore_index_settings")) {
|
} else if (name.equals("ignore_index_settings")) {
|
||||||
|
@ -548,10 +547,10 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
} else if (entry.getValue() instanceof List) {
|
} else if (entry.getValue() instanceof List) {
|
||||||
ignoreIndexSettings((List<String>) entry.getValue());
|
ignoreIndexSettings((List<String>) entry.getValue());
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed ignore_index_settings section, should be an array of strings");
|
throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name);
|
throw new IllegalArgumentException("Unknown parameter " + name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
|
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
|
||||||
|
@ -571,7 +570,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
|
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
|
@ -604,7 +603,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
|
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
|
throw new IllegalArgumentException("failed to parse repository source", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
|
@ -622,7 +621,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||||
try {
|
try {
|
||||||
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
|
throw new IllegalArgumentException("failed to parse template source", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeOperation
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) {
|
||||||
RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(
|
RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(
|
||||||
"restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(),
|
"restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(),
|
||||||
request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(),
|
request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(),
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
|
@ -92,7 +91,7 @@ public enum SnapshotIndexShardStage {
|
||||||
case 4:
|
case 4:
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalArgumentException("No snapshot shard stage for value [" + value + "]");
|
throw new IllegalArgumentException("No snapshot shard stage for value [" + value + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
|
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
@ -75,7 +74,7 @@ public class SnapshotIndexShardStatus extends BroadcastShardOperationResponse im
|
||||||
stage = SnapshotIndexShardStage.FAILURE;
|
stage = SnapshotIndexShardStage.FAILURE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalArgumentException("Unknown stage type " + indexShardStatus.stage());
|
throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.stage());
|
||||||
}
|
}
|
||||||
stats = new SnapshotStats(indexShardStatus);
|
stats = new SnapshotStats(indexShardStatus);
|
||||||
failure = indexShardStatus.failure();
|
failure = indexShardStatus.failure();
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||||
|
@ -59,7 +58,7 @@ public class SnapshotShardsStats implements ToXContent {
|
||||||
failedShards++;
|
failedShards++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalArgumentException("Unknown stage type " + shard.getStage());
|
throw new IllegalArgumentException("Unknown stage type " + shard.getStage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,7 +94,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesOperationAction
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected NodeSnapshotStatus nodeOperation(NodeRequest request) throws ElasticsearchException {
|
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
|
||||||
ImmutableMap.Builder<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = ImmutableMap.builder();
|
ImmutableMap.Builder<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = ImmutableMap.builder();
|
||||||
try {
|
try {
|
||||||
String nodeId = clusterService.localNode().id();
|
String nodeId = clusterService.localNode().id();
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||||
|
@ -180,7 +179,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation
|
||||||
stage = SnapshotIndexShardStage.DONE;
|
stage = SnapshotIndexShardStage.DONE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalArgumentException("Unknown snapshot state " + shardEntry.getValue().state());
|
throw new IllegalArgumentException("Unknown snapshot state " + shardEntry.getValue().state());
|
||||||
}
|
}
|
||||||
SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), stage);
|
SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), stage);
|
||||||
shardStatusBuilder.add(shardStatus);
|
shardStatusBuilder.add(shardStatus);
|
||||||
|
@ -216,7 +215,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation
|
||||||
state = SnapshotMetaData.State.SUCCESS;
|
state = SnapshotMetaData.State.SUCCESS;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalArgumentException("Unknown snapshot state " + snapshot.state());
|
throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state());
|
||||||
}
|
}
|
||||||
builder.add(new SnapshotStatus(snapshotId, state, shardStatusBuilder.build()));
|
builder.add(new SnapshotStatus(snapshotId, state, shardStatusBuilder.build()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) {
|
||||||
ClusterState currentState = clusterService.state();
|
ClusterState currentState = clusterService.state();
|
||||||
logger.trace("Serving cluster state request using version {}", currentState.version());
|
logger.trace("Serving cluster state request using version {}", currentState.version());
|
||||||
ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName());
|
ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName());
|
||||||
|
|
|
@ -97,7 +97,7 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction<C
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) throws ElasticsearchException {
|
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, false, true, false, true);
|
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, false, true, false, true);
|
||||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, false, true, false, false, false);
|
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, false, true, false, false, false);
|
||||||
List<ShardStats> shardsStats = new ArrayList<>();
|
List<ShardStats> shardsStats = new ArrayList<>();
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) {
|
||||||
listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
|
listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.alias;
|
package org.elasticsearch.action.admin.indices.alias;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
@ -187,7 +186,7 @@ public class Alias implements Streamable {
|
||||||
String currentFieldName = null;
|
String currentFieldName = null;
|
||||||
XContentParser.Token token = parser.nextToken();
|
XContentParser.Token token = parser.nextToken();
|
||||||
if (token == null) {
|
if (token == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("No alias is specified");
|
throw new IllegalArgumentException("No alias is specified");
|
||||||
}
|
}
|
||||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
if (token == XContentParser.Token.FIELD_NAME) {
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) {
|
||||||
|
|
||||||
//Expand the indices names
|
//Expand the indices names
|
||||||
List<AliasActions> actions = request.aliasActions();
|
List<AliasActions> actions = request.aliasActions();
|
||||||
|
|
|
@ -58,7 +58,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) {
|
||||||
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices);
|
boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices);
|
||||||
listener.onResponse(new AliasesExistResponse(result));
|
listener.onResponse(new AliasesExistResponse(result));
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
|
||||||
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
@SuppressWarnings("unchecked") // ImmutableList to List results incompatible type
|
@SuppressWarnings("unchecked") // ImmutableList to List results incompatible type
|
||||||
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
|
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
|
||||||
|
|
|
@ -26,9 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction;
|
import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction;
|
||||||
import org.elasticsearch.cluster.ClusterService;
|
import org.elasticsearch.cluster.ClusterService;
|
||||||
|
@ -98,7 +96,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) throws ElasticsearchException {
|
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) {
|
||||||
IndexService indexService = null;
|
IndexService indexService = null;
|
||||||
if (shardId != null) {
|
if (shardId != null) {
|
||||||
indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
|
@ -108,12 +106,12 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
String field = null;
|
String field = null;
|
||||||
if (request.field() != null) {
|
if (request.field() != null) {
|
||||||
if (indexService == null) {
|
if (indexService == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
|
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
|
||||||
}
|
}
|
||||||
FieldMapper<?> fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field());
|
FieldMapper<?> fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field());
|
||||||
if (fieldMapper != null) {
|
if (fieldMapper != null) {
|
||||||
if (fieldMapper.isNumeric()) {
|
if (fieldMapper.isNumeric()) {
|
||||||
throw new ElasticsearchIllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
|
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
|
||||||
}
|
}
|
||||||
analyzer = fieldMapper.indexAnalyzer();
|
analyzer = fieldMapper.indexAnalyzer();
|
||||||
field = fieldMapper.names().indexName();
|
field = fieldMapper.names().indexName();
|
||||||
|
@ -134,20 +132,20 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
analyzer = indexService.analysisService().analyzer(request.analyzer());
|
analyzer = indexService.analysisService().analyzer(request.analyzer());
|
||||||
}
|
}
|
||||||
if (analyzer == null) {
|
if (analyzer == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
|
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
|
||||||
}
|
}
|
||||||
} else if (request.tokenizer() != null) {
|
} else if (request.tokenizer() != null) {
|
||||||
TokenizerFactory tokenizerFactory;
|
TokenizerFactory tokenizerFactory;
|
||||||
if (indexService == null) {
|
if (indexService == null) {
|
||||||
TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer());
|
TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer());
|
||||||
if (tokenizerFactoryFactory == null) {
|
if (tokenizerFactoryFactory == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
|
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
|
||||||
}
|
}
|
||||||
tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS);
|
tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS);
|
||||||
} else {
|
} else {
|
||||||
tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
|
tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
|
||||||
if (tokenizerFactory == null) {
|
if (tokenizerFactory == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
|
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,17 +157,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
if (indexService == null) {
|
if (indexService == null) {
|
||||||
TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName);
|
TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName);
|
||||||
if (tokenFilterFactoryFactory == null) {
|
if (tokenFilterFactoryFactory == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
|
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
|
||||||
}
|
}
|
||||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS);
|
tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS);
|
||||||
} else {
|
} else {
|
||||||
tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
|
tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
|
||||||
if (tokenFilterFactories[i] == null) {
|
if (tokenFilterFactories[i] == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (tokenFilterFactories[i] == null) {
|
if (tokenFilterFactories[i] == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -182,17 +180,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
if (indexService == null) {
|
if (indexService == null) {
|
||||||
CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName);
|
CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName);
|
||||||
if (charFilterFactoryFactory == null) {
|
if (charFilterFactoryFactory == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
|
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
|
||||||
}
|
}
|
||||||
charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS);
|
charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS);
|
||||||
} else {
|
} else {
|
||||||
charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName);
|
charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName);
|
||||||
if (charFilterFactories[i] == null) {
|
if (charFilterFactories[i] == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]");
|
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (charFilterFactories[i] == null) {
|
if (charFilterFactories[i] == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]");
|
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -207,7 +205,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (analyzer == null) {
|
if (analyzer == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to find analyzer");
|
throw new IllegalArgumentException("failed to find analyzer");
|
||||||
}
|
}
|
||||||
|
|
||||||
List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
|
List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
|
||||||
|
@ -220,7 +218,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
||||||
OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
|
OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
|
||||||
TypeAttribute type = stream.addAttribute(TypeAttribute.class);
|
TypeAttribute type = stream.addAttribute(TypeAttribute.class);
|
||||||
|
|
||||||
int position = 0;
|
int position = -1;
|
||||||
while (stream.incrementToken()) {
|
while (stream.incrementToken()) {
|
||||||
int increment = posIncr.getPositionIncrement();
|
int increment = posIncr.getPositionIncrement();
|
||||||
if (increment > 0) {
|
if (increment > 0) {
|
||||||
|
|
|
@ -97,12 +97,10 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) throws ElasticsearchException {
|
protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) {
|
||||||
IndexService service = indicesService.indexService(request.shardId().getIndex());
|
IndexService service = indicesService.indexService(request.shardId().getIndex());
|
||||||
if (service != null) {
|
if (service != null) {
|
||||||
IndexShard shard = service.shard(request.shardId().id());
|
IndexShard shard = service.shard(request.shardId().id());
|
||||||
// we always clear the query cache
|
|
||||||
service.cache().queryParserCache().clear();
|
|
||||||
boolean clearedAtLeastOne = false;
|
boolean clearedAtLeastOne = false;
|
||||||
if (request.filterCache()) {
|
if (request.filterCache()) {
|
||||||
clearedAtLeastOne = true;
|
clearedAtLeastOne = true;
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
|
||||||
final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
|
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
|
||||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.create;
|
||||||
import com.google.common.base.Charsets;
|
import com.google.common.base.Charsets;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
|
@ -239,7 +238,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
||||||
try {
|
try {
|
||||||
mappings.put(type, source.string());
|
mappings.put(type, source.string());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
|
throw new IllegalArgumentException("Failed to build json for mapping request", e);
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener<CreateIndexResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener<CreateIndexResponse> listener) {
|
||||||
String cause = request.cause();
|
String cause = request.cause();
|
||||||
if (cause.length() == 0) {
|
if (cause.length() == 0) {
|
||||||
cause = "api";
|
cause = "api";
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
|
||||||
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
if (concreteIndices.length == 0) {
|
if (concreteIndices.length == 0) {
|
||||||
listener.onResponse(new DeleteIndexResponse(true));
|
listener.onResponse(new DeleteIndexResponse(true));
|
||||||
|
|
|
@ -64,7 +64,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadOperati
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener<IndicesExistsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener<IndicesExistsResponse> listener) {
|
||||||
boolean exists;
|
boolean exists;
|
||||||
try {
|
try {
|
||||||
// Similar as the previous behaviour, but now also aliases and wildcards are supported.
|
// Similar as the previous behaviour, but now also aliases and wildcards are supported.
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) {
|
||||||
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
if (concreteIndices.length == 0) {
|
if (concreteIndices.length == 0) {
|
||||||
listener.onResponse(new TypesExistsResponse(false));
|
listener.onResponse(new TypesExistsResponse(false));
|
||||||
|
|
|
@ -90,7 +90,7 @@ public class TransportFlushAction extends TransportBroadcastOperationAction<Flus
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardFlushResponse shardOperation(ShardFlushRequest request) throws ElasticsearchException {
|
protected ShardFlushResponse shardOperation(ShardFlushRequest request) {
|
||||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||||
indexShard.flush(request.getRequest());
|
indexShard.flush(request.getRequest());
|
||||||
return new ShardFlushResponse(request.shardId());
|
return new ShardFlushResponse(request.shardId());
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.get;
|
||||||
|
|
||||||
import com.google.common.collect.ObjectArrays;
|
import com.google.common.collect.ObjectArrays;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
|
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -74,18 +73,18 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
||||||
return this.validNames.contains(name);
|
return this.validNames.contains(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Feature fromName(String name) throws ElasticsearchIllegalArgumentException {
|
public static Feature fromName(String name) {
|
||||||
for (Feature feature : Feature.values()) {
|
for (Feature feature : Feature.values()) {
|
||||||
if (feature.validName(name)) {
|
if (feature.validName(name)) {
|
||||||
return feature;
|
return feature;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new ElasticsearchIllegalArgumentException("No feature for name [" + name + "]");
|
throw new IllegalArgumentException("No feature for name [" + name + "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Feature fromId(byte id) throws ElasticsearchIllegalArgumentException {
|
public static Feature fromId(byte id) {
|
||||||
if (id < 0 || id >= FEATURES.length) {
|
if (id < 0 || id >= FEATURES.length) {
|
||||||
throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]");
|
throw new IllegalArgumentException("No mapping for id [" + id + "]");
|
||||||
}
|
}
|
||||||
return FEATURES[id];
|
return FEATURES[id];
|
||||||
}
|
}
|
||||||
|
@ -104,7 +103,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
||||||
|
|
||||||
public GetIndexRequest features(Feature... features) {
|
public GetIndexRequest features(Feature... features) {
|
||||||
if (features == null) {
|
if (features == null) {
|
||||||
throw new ElasticsearchIllegalArgumentException("features cannot be null");
|
throw new IllegalArgumentException("features cannot be null");
|
||||||
} else {
|
} else {
|
||||||
this.features = features;
|
this.features = features;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.get;
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
|
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
|
@ -70,7 +69,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
|
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
|
||||||
final ActionListener<GetIndexResponse> listener) throws ElasticsearchException {
|
final ActionListener<GetIndexResponse> listener) {
|
||||||
ImmutableOpenMap<String, ImmutableList<Entry>> warmersResult = ImmutableOpenMap.of();
|
ImmutableOpenMap<String, ImmutableList<Entry>> warmersResult = ImmutableOpenMap.of();
|
||||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
|
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
|
||||||
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
|
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
|
||||||
|
@ -112,7 +111,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw new ElasticsearchIllegalStateException("feature [" + feature + "] is not valid");
|
throw new IllegalStateException("feature [" + feature + "] is not valid");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
listener.onResponse(new GetIndexResponse(concreteIndices, warmersResult, mappingsResult, aliasesResult, settings));
|
listener.onResponse(new GetIndexResponse(concreteIndices, warmersResult, mappingsResult, aliasesResult, settings));
|
||||||
|
|
|
@ -87,7 +87,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) throws ElasticsearchException {
|
protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) {
|
||||||
assert shardId != null;
|
assert shardId != null;
|
||||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
Collection<String> typeIntersection;
|
Collection<String> typeIntersection;
|
||||||
|
@ -173,7 +173,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private ImmutableMap<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) throws ElasticsearchException {
|
private ImmutableMap<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) {
|
||||||
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<>();
|
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<>();
|
||||||
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
|
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
|
||||||
for (String field : request.fields()) {
|
for (String field : request.fields()) {
|
||||||
|
|
|
@ -60,7 +60,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetMappingsResponse> listener) throws ElasticsearchException {
|
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetMappingsResponse> listener) {
|
||||||
logger.trace("serving getMapping request based on version {}", state.version());
|
logger.trace("serving getMapping request based on version {}", state.version());
|
||||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = state.metaData().findMappings(
|
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = state.metaData().findMappings(
|
||||||
concreteIndices, request.types()
|
concreteIndices, request.types()
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.mapping.put;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.ObjectOpenHashSet;
|
import com.carrotsearch.hppc.ObjectOpenHashSet;
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
|
@ -170,7 +169,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
||||||
for (String s : s1) {
|
for (String s : s1) {
|
||||||
String[] s2 = Strings.split(s, "=");
|
String[] s2 = Strings.split(s, "=");
|
||||||
if (s2.length != 2) {
|
if (s2.length != 2) {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed " + s);
|
throw new IllegalArgumentException("malformed " + s);
|
||||||
}
|
}
|
||||||
builder.field(s2[0], s2[1]);
|
builder.field(s2[0], s2[1]);
|
||||||
}
|
}
|
||||||
|
@ -190,7 +189,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
||||||
for (String s : s1) {
|
for (String s : s1) {
|
||||||
String[] s2 = Strings.split(s, "=");
|
String[] s2 = Strings.split(s, "=");
|
||||||
if (s2.length != 2) {
|
if (s2.length != 2) {
|
||||||
throw new ElasticsearchIllegalArgumentException("malformed " + s);
|
throw new IllegalArgumentException("malformed " + s);
|
||||||
}
|
}
|
||||||
builder.field(s2[0], s2[1]);
|
builder.field(s2[0], s2[1]);
|
||||||
}
|
}
|
||||||
|
@ -203,7 +202,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("failed to generate simplified mapping definition", e);
|
throw new IllegalArgumentException("failed to generate simplified mapping definition", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +213,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
||||||
try {
|
try {
|
||||||
return source(mappingBuilder.string());
|
return source(mappingBuilder.string());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
|
throw new IllegalArgumentException("Failed to build json for mapping request", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) {
|
||||||
final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices());
|
final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
|
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
|
||||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeOperationAction
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) {
|
||||||
final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
|
OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
|
||||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||||
|
|
|
@ -91,7 +91,7 @@ public class TransportOptimizeAction extends TransportBroadcastOperationAction<O
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) throws ElasticsearchException {
|
protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) {
|
||||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||||
indexShard.optimize(request.optimizeRequest());
|
indexShard.optimize(request.optimizeRequest());
|
||||||
return new ShardOptimizeResponse(request.shardId());
|
return new ShardOptimizeResponse(request.shardId());
|
||||||
|
|
|
@ -124,7 +124,7 @@ public class TransportRecoveryAction extends TransportBroadcastOperationAction<R
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) throws ElasticsearchException {
|
protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) {
|
||||||
|
|
||||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||||
|
|
|
@ -91,7 +91,7 @@ public class TransportRefreshAction extends TransportBroadcastOperationAction<Re
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) throws ElasticsearchException {
|
protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) {
|
||||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||||
indexShard.refresh("api");
|
indexShard.refresh("api");
|
||||||
logger.trace("{} refresh request executed", indexShard.shardId());
|
logger.trace("{} refresh request executed", indexShard.shardId());
|
||||||
|
|
|
@ -117,7 +117,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardSegments shardOperation(IndexShardSegmentRequest request) throws ElasticsearchException {
|
protected ShardSegments shardOperation(IndexShardSegmentRequest request) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||||
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose));
|
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose));
|
||||||
|
|
|
@ -72,7 +72,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadOperation
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) {
|
||||||
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
|
ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
|
||||||
for (String concreteIndex : concreteIndices) {
|
for (String concreteIndex : concreteIndices) {
|
||||||
|
|
|
@ -74,7 +74,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeOperationA
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) {
|
||||||
final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices());
|
final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices());
|
||||||
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
|
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
|
||||||
.indices(concreteIndices)
|
.indices(concreteIndices)
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardStats shardOperation(IndexShardStatsRequest request) throws ElasticsearchException {
|
protected ShardStats shardOperation(IndexShardStatsRequest request) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||||
// if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet
|
// if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOpera
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener<DeleteIndexTemplateResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener<DeleteIndexTemplateResponse> listener) {
|
||||||
indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() {
|
indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) {
|
public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) {
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOpe
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) throws ElasticsearchException {
|
protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) {
|
||||||
List<IndexTemplateMetaData> results;
|
List<IndexTemplateMetaData> results;
|
||||||
|
|
||||||
// If we did not ask for a specific name, then we return all templates
|
// If we did not ask for a specific name, then we return all templates
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue