Merge branch 'master' into die_cwd_die

This commit is contained in:
Robert Muir 2015-04-29 09:15:18 -04:00
commit 6bd69b74f1
755 changed files with 3473 additions and 8002 deletions

View File

@ -34,6 +34,10 @@ h2. Getting Started
First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
h3. Requirements
You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information.
h3. Installation
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.

View File

@ -513,7 +513,7 @@ def publish_repositories(version, dry_run=True):
else:
print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version)
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
run('dev-tools/build_repositories.sh %s', src_branch)
run('dev-tools/build_repositories.sh %s' % src_branch)
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')

View File

@ -32,12 +32,12 @@ set -e
## GPG_KEY_ID: Key id of your GPG key
## AWS_ACCESS_KEY_ID: AWS access key id
## AWS_SECRET_ACCESS_KEY: AWS secret access key
## S3_BUCKET_SYNC_TO Bucket to write packages to, defaults to packages.elasticsearch.org/elasticsearch
## S3_BUCKET_SYNC_TO Bucket to write packages to, should be set packages.elasticsearch.org for a regular release
##
##
## optional
##
## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org/elasticsearch
## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org
## KEEP_DIRECTORIES Allows to keep all the generated directory structures for debugging
## GPG_KEYRING Configure GPG keyring home, defaults to ~/.gnupg/
##
@ -51,7 +51,7 @@ set -e
# No trailing slashes!
if [ -z $S3_BUCKET_SYNC_FROM ] ; then
S3_BUCKET_SYNC_FROM="packages.elasticsearch.org/elasticsearch"
S3_BUCKET_SYNC_FROM="packages.elasticsearch.org"
fi
if [ ! -z $GPG_KEYRING ] ; then
GPG_HOMEDIR="--homedir ${GPG_KEYRING}"
@ -156,7 +156,7 @@ centosdir=$tempdir/repository/elasticsearch/$version/centos
mkdir -p $centosdir
echo "RPM: Syncing repository for version $version into $centosdir"
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/centos/ $centosdir
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/centos/ $centosdir
rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm
echo "RPM: Copying $rpm into $centosdor"
@ -191,7 +191,7 @@ mkdir -p $debbasedir
echo "DEB: Syncing debian repository of version $version to $debbasedir"
# sync all former versions into directory
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/debian/ $debbasedir
$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/debian/ $debbasedir
# create directories in case of a new release so that syncing did not create this structure
mkdir -p $debbasedir/dists/stable/main/binary-all

View File

@ -20,22 +20,3 @@ example:
}
}
--------------------------------------------------
[float]
[[include-exclude]]
==== Includes / Excludes
Allow to specify paths in the source that would be included / excluded
when it's stored, supporting `*` as wildcard annotation. For example:
[source,js]
--------------------------------------------------
{
"my_type" : {
"_source" : {
"includes" : ["path1.*", "path2.*"],
"excludes" : ["path3.*"]
}
}
}
--------------------------------------------------

View File

@ -67,8 +67,3 @@ the fact that the following JSON document is perfectly fine:
}
--------------------------------------------------
Note also, that thanks to the fact that we used the `index_name` to use
the non plural form (`tag` instead of `tags`), we can actually refer to
the field using the `index_name` as well. For example, we can execute a
query using `tweet.tags:wow` or `tweet.tag:wow`. We could, of course,
name the field as `tag` and skip the `index_name` all together).

View File

@ -270,7 +270,7 @@ to provide special features. They now have limited configuration options.
* `_field_names` configuration is limited to disabling the field.
* `_size` configuration is limited to enabling the field.
=== Boolean fields
==== Boolean fields
Boolean fields used to have a string fielddata with `F` meaning `false` and `T`
meaning `true`. They have been refactored to use numeric fielddata, with `0`
@ -302,10 +302,14 @@ the user-friendly representation of boolean fields: `false`/`true`:
]
---------------
=== Murmur3 Fields
==== Murmur3 Fields
Fields of type `murmur3` can no longer change `doc_values` or `index` setting.
They are always stored with doc values, and not indexed.
==== Source field configuration
The `_source` field no longer supports `includes` and `excludes` paramters. When
`_source` is enabled, the entire original source will be stored.
=== Codecs
It is no longer possible to specify per-field postings and doc values formats
@ -345,6 +349,11 @@ Deprecated script parameters `id`, `file`, and `scriptField` have been removed
from all scriptable APIs. `script_id`, `script_file` and `script` should be used
in their place.
=== Groovy scripts sandbox
The groovy sandbox and related settings have been removed. Groovy is now a non
sandboxed scripting language, without any option to turn the sandbox on.
=== Plugins making use of scripts
Plugins that make use of scripts must register their own script context through
@ -410,3 +419,26 @@ a single `expand_wildcards` parameter. See <<multi-index,the multi-index docs>>
The `_shutdown` API has been removed without a replacement. Nodes should be managed via operating
systems and the provided start/stop scripts.
=== Analyze API
The Analyze API return 0 as first Token's position instead of 1.
=== Multiple data.path striping
Previously, if the `data.path` setting listed multiple data paths, then a
shard would be ``striped'' across all paths by writing a whole file to each
path in turn (in accordance with the `index.store.distributor` setting). The
result was that the files from a single segment in a shard could be spread
across multiple disks, and the failure of any one disk could corrupt multiple
shards.
This striping is no longer supported. Instead, different shards may be
allocated to different paths, but all of the files in a single shard will be
written to the same path.
If striping is detected while starting Elasticsearch 2.0.0 or later, all of
the files belonging to the same shard will be migrated to the same path. If
there is not enough disk space to complete this migration, the upgrade will be
cancelled and can only be resumed once enough disk space is made available.
The `index.store.distributor` setting has also been removed.

View File

@ -227,7 +227,7 @@ several attributes, for example:
[source,js]
--------------------------------------------------
curl -XPUT localhost:9200/test/_settings -d '{
"index.routing.allocation.include.group1" : "xxx"
"index.routing.allocation.include.group1" : "xxx",
"index.routing.allocation.include.group2" : "yyy",
"index.routing.allocation.exclude.group3" : "zzz",
"index.routing.allocation.require.group4" : "aaa"

View File

@ -11,26 +11,11 @@ The scripting module uses by default http://groovy.codehaus.org/[groovy]
scripting language with some extensions. Groovy is used since it is extremely
fast and very simple to use.
.Groovy dynamic scripting disabled by default from v1.4.3
.Groovy dynamic scripting off by default from v1.4.3
[IMPORTANT]
===================================================
Elasticsearch versions 1.3.0-1.3.7 and 1.4.0-1.4.2 have a vulnerability in the
Groovy scripting engine. The vulnerability allows an attacker to construct
Groovy scripts that escape the sandbox and execute shell commands as the user
running the Elasticsearch Java VM.
If you are running a vulnerable version of Elasticsearch, you should either
upgrade to at least v1.3.8 or v1.4.3, or disable dynamic Groovy scripts by
adding this setting to the `config/elasticsearch.yml` file in all nodes in the
cluster:
[source,yaml]
-----------------------------------
script.groovy.sandbox.enabled: false
-----------------------------------
This will turn off the Groovy sandbox, thus preventing dynamic Groovy scripts
Groovy dynamic scripting is off by default, preventing dynamic Groovy scripts
from being accepted as part of a request or retrieved from the special
`.scripts` index. You will still be able to use Groovy scripts stored in files
in the `config/scripts/` directory on every node.
@ -351,39 +336,6 @@ The default scripting language (assuming no `lang` parameter is provided) is
`groovy`. In order to change it, set the `script.default_lang` to the
appropriate language.
[float]
=== Groovy Sandboxing
Elasticsearch sandboxes Groovy scripts that are compiled and executed in order
to ensure they don't perform unwanted actions. There are a number of options
that can be used for configuring this sandbox:
`script.groovy.sandbox.receiver_whitelist`::
Comma-separated list of string classes for objects that may have methods
invoked.
`script.groovy.sandbox.package_whitelist`::
Comma-separated list of packages under which new objects may be constructed.
`script.groovy.sandbox.class_whitelist`::
Comma-separated list of classes that are allowed to be constructed.
`script.groovy.sandbox.method_blacklist`::
Comma-separated list of methods that are never allowed to be invoked,
regardless of target object.
`script.groovy.sandbox.enabled`::
Flag to enable the sandbox (defaults to `false` meaning the sandbox is
disabled).
When specifying whitelist or blacklist settings for the groovy sandbox, all
options replace the current whitelist, they are not additive.
[float]
=== Automatic Script Reloading

View File

@ -175,7 +175,8 @@ doing so would look like:
"field_value_factor": {
"field": "popularity",
"factor": 1.2,
"modifier": "sqrt"
"modifier": "sqrt",
"missing": 1
}
--------------------------------------------------
@ -193,6 +194,8 @@ There are a number of options for the `field_value_factor` function:
|`modifier` |Modifier to apply to the field value, can be one of: `none`, `log`,
`log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`.
Defaults to `none`.
|`missing` |Value used if the document doesn't have that field. The modifier
and factor are still applied to it as though it were read from the document.
|=======================================================================
Keep in mind that taking the log() of 0, or the square root of a negative number

View File

@ -84,24 +84,28 @@ $ curl -XPUT 'http://localhost:9200/transactions/stock/1' -d '
"type": "sale",
"amount": 80
}
'
$ curl -XPUT 'http://localhost:9200/transactions/stock/2' -d '
{
"type": "cost",
"amount": 10
}
'
$ curl -XPUT 'http://localhost:9200/transactions/stock/3' -d '
{
"type": "cost",
"amount": 30
}
'
$ curl -XPUT 'http://localhost:9200/transactions/stock/4' -d '
{
"type": "sale",
"amount": 130
}
'
--------------------------------------------------
Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is

View File

@ -18,30 +18,22 @@ on the node. Can hold multiple locations. | {path.home}/data| path.data
| plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins
|=======================================================================
The multiple data locations allows to stripe it. The striping is simple,
placing whole files in one of the locations, and deciding where to place
the file based on the value of the `index.store.distributor` setting:
Multiple `data` paths may be specified, in order to spread data across
multiple disks or locations, but all of the files from a single shard will be
written to the same path. This can be configured as follows:
* `least_used` (default) always selects the directory with the most
available space +
* `random` selects directories at random. The probability of selecting
a particular directory is proportional to amount of available space in
this directory.
---------------------------------
path.data: /mnt/first,/mnt/second
---------------------------------
Note, there are no multiple copies of the same data, in that, its
similar to RAID 0. Though simple, it should provide a good solution for
people that don't want to mess with RAID. Here is how it is configured:
Or in an array format:
---------------------------------
path.data: /mnt/first,/mnt/second
---------------------------------
Or the in an array format:
----------------------------------------
path.data: ["/mnt/first", "/mnt/second"]
----------------------------------------
----------------------------------------
path.data: ["/mnt/first", "/mnt/second"]
----------------------------------------
TIP: To stripe shards across multiple disks, please use a RAID driver
instead.
[float]
[[default-paths]]

View File

@ -73,7 +73,7 @@
<repository>
<id>lucene-snapshots</id>
<name>Lucene Snapshots</name>
<url>https://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision}</url>
<url>http://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision}</url>
</repository>
</repositories>

View File

@ -41,10 +41,6 @@
"routing": {
"type" : "string",
"description" : "Specific routing value"
},
"source": {
"type" : "string",
"description" : "The URL-encoded query definition (instead of using the request body)"
}
}
},

View File

@ -23,10 +23,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"routing": {
"type": "list",
"description": "A comma-separated list of specific routing values"

View File

@ -1,75 +0,0 @@
{
"delete_by_query": {
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html",
"methods": ["DELETE"],
"url": {
"path": "/{index}/_query",
"paths": ["/{index}/_query", "/{index}/{type}/_query"],
"parts": {
"index": {
"type" : "list",
"required": true,
"description" : "A comma-separated list of indices to restrict the operation; use `_all` to perform the operation on all indices"
},
"type": {
"type" : "list",
"description" : "A comma-separated list of types to restrict the operation"
}
},
"params": {
"analyzer": {
"type" : "string",
"description" : "The analyzer to use for the query string"
},
"consistency": {
"type" : "enum",
"options" : ["one", "quorum", "all"],
"description" : "Specific write consistency setting for the operation"
},
"default_operator": {
"type" : "enum",
"options" : ["AND","OR"],
"default" : "OR",
"description" : "The default operator for query string query (AND or OR)"
},
"df": {
"type" : "string",
"description" : "The field to use as default where no field prefix is given in the query string"
},
"ignore_unavailable": {
"type" : "boolean",
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
},
"allow_no_indices": {
"type" : "boolean",
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
},
"expand_wildcards": {
"type" : "enum",
"options" : ["open","closed","none","all"],
"default" : "open",
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
},
"q": {
"type" : "string",
"description" : "Query in the Lucene query string syntax"
},
"routing": {
"type" : "string",
"description" : "Specific routing value"
},
"source": {
"type" : "string",
"description" : "The URL-encoded query definition (instead of using the request body)"
},
"timeout": {
"type" : "time",
"description" : "Explicit operation timeout"
}
}
},
"body": {
"description" : "A query to restrict the operation specified with the Query DSL"
}
}
}

View File

@ -69,10 +69,6 @@
"type" : "string",
"description" : "Specific routing value"
},
"source": {
"type" : "string",
"description" : "The URL-encoded query definition (instead of using the request body)"
},
"_source": {
"type" : "list",
"description" : "True or false to return the _source field or not, or a list of fields to return"

View File

@ -12,10 +12,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"analyzer": {
"type" : "string",
"description" : "The name of the analyzer to use"

View File

@ -37,10 +37,6 @@
"operation_threading": {
"description" : "TODO: ?"
},
"source": {
"type" : "string",
"description" : "The URL-encoded query definition (instead of using the request body)"
},
"q": {
"type" : "string",
"description" : "Query in the Lucene query string syntax"

View File

@ -16,10 +16,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"fields": {
"type": "list",
"description" : "A comma-separated list of fields to return in the response"

View File

@ -23,10 +23,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"boost_terms": {
"type" : "number",
"description" : "The boost factor"

View File

@ -16,10 +16,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"ignore_unavailable": {
"type": "boolean",
"description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"

View File

@ -16,10 +16,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"search_type": {
"type" : "enum",
"options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"],

View File

@ -16,10 +16,6 @@
}
},
"params" : {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"ids" : {
"type" : "list",
"description" : "A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body",

View File

@ -23,10 +23,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"routing": {
"type" : "list",
"description" : "A comma-separated list of specific routing values"

View File

@ -12,10 +12,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"scroll": {
"type" : "duration",
"description" : "Specify how long a consistent view of the index should be maintained for scrolled search"

View File

@ -101,10 +101,6 @@
"type" : "list",
"description" : "A comma-separated list of <field>:<direction> pairs"
},
"source": {
"type" : "string",
"description" : "The URL-encoded request definition using the Query DSL (instead of using request body)"
},
"_source": {
"type" : "list",
"description" : "True or false to return the _source field or not, or a list of fields to return"

View File

@ -41,10 +41,6 @@
"routing": {
"type" : "string",
"description" : "Specific routing value"
},
"source": {
"type" : "string",
"description" : "The URL-encoded query definition (instead of using the request body)"
}
}
},

View File

@ -16,10 +16,6 @@
}
},
"params" : {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"ignore_unavailable": {
"type" : "boolean",
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"

View File

@ -33,10 +33,6 @@
"routing": {
"type" : "string",
"description" : "Specific routing value"
},
"source": {
"type" : "string",
"description" : "The URL-encoded request definition (instead of using request body)"
}
}
},

View File

@ -22,10 +22,6 @@
}
},
"params": {
"source": {
"type" : "string",
"description" : "The URL-encoded request definition"
},
"term_statistics" : {
"type" : "boolean",
"description" : "Specifies if total term frequency and document frequency should be returned.",

View File

@ -1,42 +0,0 @@
---
"Basic delete_by_query":
- do:
index:
index: test_1
type: test
id: 1
body: { foo: bar }
- do:
index:
index: test_1
type: test
id: 2
body: { foo: baz }
- do:
index:
index: test_1
type: test
id: 3
body: { foo: foo }
- do:
indices.refresh: {}
- do:
delete_by_query:
index: test_1
body:
query:
match:
foo: bar
- do:
indices.refresh: {}
- do:
count:
index: test_1
- match: { count: 2 }

View File

@ -21,7 +21,6 @@ package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import java.io.IOException;
import java.util.Collections;
@ -97,7 +96,7 @@ public class PrefixAnalyzer extends Analyzer {
this.currentPrefix = null;
this.separator = separator;
if (prefixes == null || !prefixes.iterator().hasNext()) {
throw new ElasticsearchIllegalArgumentException("one or more prefixes needed");
throw new IllegalArgumentException("one or more prefixes needed");
}
}

View File

@ -19,7 +19,6 @@
package org.apache.lucene.store;
import org.apache.lucene.store.RateLimiter.SimpleRateLimiter;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -42,7 +41,7 @@ public class StoreRateLimiting {
MERGE,
ALL;
public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
public static Type fromString(String type) {
if ("none".equalsIgnoreCase(type)) {
return NONE;
} else if ("merge".equalsIgnoreCase(type)) {
@ -50,7 +49,7 @@ public class StoreRateLimiting {
} else if ("all".equalsIgnoreCase(type)) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
throw new IllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
}
}
@ -88,7 +87,7 @@ public class StoreRateLimiting {
this.type = type;
}
public void setType(String type) throws ElasticsearchIllegalArgumentException {
public void setType(String type) {
this.type = Type.fromString(type);
}
}

View File

@ -194,7 +194,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
if (this instanceof ElasticsearchWrapperException) {
toXContent(builder, params, this);
} else {
builder.field("type", getExceptionName(this));
builder.field("type", getExceptionName());
builder.field("reason", getMessage());
innerToXContent(builder, params);
}
@ -261,7 +261,16 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
if (ex instanceof ElasticsearchException) {
return ((ElasticsearchException) ex).guessRootCauses();
}
return new ElasticsearchException[0];
return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) {
@Override
protected String getExceptionName() {
return getExceptionName(getCause());
}
}};
}
protected String getExceptionName() {
return getExceptionName(this);
}
/**

View File

@ -1,45 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch;
import org.elasticsearch.rest.RestStatus;
/**
*
*/
public class ElasticsearchIllegalArgumentException extends ElasticsearchException {
public ElasticsearchIllegalArgumentException() {
super(null);
}
public ElasticsearchIllegalArgumentException(String msg) {
super(msg);
}
public ElasticsearchIllegalArgumentException(String msg, Throwable cause) {
super(msg, cause);
}
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
}

View File

@ -1,38 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch;
/**
*
*/
public class ElasticsearchIllegalStateException extends ElasticsearchException {
public ElasticsearchIllegalStateException() {
super(null);
}
public ElasticsearchIllegalStateException(String msg) {
super(msg);
}
public ElasticsearchIllegalStateException(String msg, Throwable cause) {
super(msg, cause);
}
}

View File

@ -459,12 +459,12 @@ public class Version {
/**
* Return the {@link Version} of Elasticsearch that has been used to create an index given its settings.
*
* @throws ElasticsearchIllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED}
* @throws IllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED}
*/
public static Version indexCreated(Settings indexSettings) {
final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
if (indexVersion == null) {
throw new ElasticsearchIllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]");
throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]");
}
return indexVersion;
}

View File

@ -35,29 +35,29 @@ public interface ActionFuture<T> extends Future<T> {
/**
* Similar to {@link #get()}, just catching the {@link InterruptedException} and throwing
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
* an {@link IllegalStateException} instead. Also catches
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
* <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
* still accessible using {@link #getRootFailure()}.
*/
T actionGet() throws ElasticsearchException;
T actionGet();
/**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
* an {@link IllegalStateException} instead. Also catches
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
* <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
* still accessible using {@link #getRootFailure()}.
*/
T actionGet(String timeout) throws ElasticsearchException;
T actionGet(String timeout);
/**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
* an {@link IllegalStateException} instead. Also catches
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
* <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
@ -66,29 +66,29 @@ public interface ActionFuture<T> extends Future<T> {
*
* @param timeoutMillis Timeout in millis
*/
T actionGet(long timeoutMillis) throws ElasticsearchException;
T actionGet(long timeoutMillis);
/**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
* an {@link IllegalStateException} instead. Also catches
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
* <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
* still accessible using {@link #getRootFailure()}.
*/
T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException;
T actionGet(long timeout, TimeUnit unit);
/**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
* an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches
* an {@link IllegalStateException} instead. Also catches
* {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
* <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
* still accessible using {@link #getRootFailure()}.
*/
T actionGet(TimeValue timeout) throws ElasticsearchException;
T actionGet(TimeValue timeout);
/**
* The root (possibly) wrapped failure.

View File

@ -124,10 +124,6 @@ import org.elasticsearch.action.count.CountAction;
import org.elasticsearch.action.count.TransportCountAction;
import org.elasticsearch.action.delete.DeleteAction;
import org.elasticsearch.action.delete.TransportDeleteAction;
import org.elasticsearch.action.deletebyquery.DeleteByQueryAction;
import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction;
import org.elasticsearch.action.deletebyquery.TransportIndexDeleteByQueryAction;
import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction;
import org.elasticsearch.action.exists.ExistsAction;
import org.elasticsearch.action.exists.TransportExistsAction;
import org.elasticsearch.action.explain.ExplainAction;
@ -284,8 +280,6 @@ public class ActionModule extends AbstractModule {
TransportShardMultiGetAction.class);
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
TransportShardBulkAction.class);
registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class,
TransportIndexDeleteByQueryAction.class, TransportShardDeleteByQueryAction.class);
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
TransportSearchDfsQueryThenFetchAction.class,
TransportSearchQueryThenFetchAction.class,

View File

@ -69,21 +69,21 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
/**
* Short version of execute().actionGet().
*/
public Response get() throws ElasticsearchException {
public Response get() {
return execute().actionGet();
}
/**
* Short version of execute().actionGet().
*/
public Response get(TimeValue timeout) throws ElasticsearchException {
public Response get(TimeValue timeout) {
return execute().actionGet(timeout);
}
/**
* Short version of execute().actionGet().
*/
public Response get(String timeout) throws ElasticsearchException {
public Response get(String timeout) {
return execute().actionGet(timeout);
}

View File

@ -19,7 +19,8 @@
package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.ElasticsearchException;
import java.util.ArrayList;
import java.util.List;
@ -27,12 +28,12 @@ import java.util.List;
/**
*
*/
public class ActionRequestValidationException extends ElasticsearchIllegalArgumentException {
public class ActionRequestValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ActionRequestValidationException() {
super(null);
super("validation failed");
}
public void addValidationError(String error) {

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
/**
*
@ -108,7 +107,7 @@ public enum ThreadingModel {
} else if (id == 3) {
return OPERATION_LISTENER;
} else {
throw new ElasticsearchIllegalArgumentException("No threading model for [" + id + "]");
throw new IllegalArgumentException("No threading model for [" + id + "]");
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
/**
* Write Consistency Level control how many replicas should be active for a write operation to occur (a write operation
@ -53,7 +52,7 @@ public enum WriteConsistencyLevel {
} else if (value == 3) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
throw new IllegalArgumentException("No write consistency match [" + value + "]");
}
public static WriteConsistencyLevel fromString(String value) {
@ -66,6 +65,6 @@ public enum WriteConsistencyLevel {
} else if (value.equals("all")) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
throw new IllegalArgumentException("No write consistency match [" + value + "]");
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
/**
*
@ -48,7 +47,7 @@ public enum ClusterHealthStatus {
case 2:
return RED;
default:
throw new ElasticsearchIllegalArgumentException("No cluster health status for value [" + value + "]");
throw new IllegalArgumentException("No cluster health status for value [" + value + "]");
}
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.IndicesOptions;
@ -66,7 +65,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati
}
@Override
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) throws ElasticsearchException {
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
if (request.waitForEvents() != null) {
final long endTime = System.currentTimeMillis() + request.timeout().millis();
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
@ -141,7 +140,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati
@Override
public void onClusterServiceClose() {
listener.onFailure(new ElasticsearchIllegalStateException("ClusterService was close during health call"));
listener.onFailure(new IllegalStateException("ClusterService was close during health call"));
}
@Override

View File

@ -73,7 +73,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesOperationActio
}
@Override
protected NodeHotThreads nodeOperation(NodeRequest request) throws ElasticsearchException {
protected NodeHotThreads nodeOperation(NodeRequest request) {
HotThreads hotThreads = new HotThreads()
.busiestThreads(request.request.threads)
.type(request.request.type)

View File

@ -77,7 +77,7 @@ public class TransportNodesInfoAction extends TransportNodesOperationAction<Node
}
@Override
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) throws ElasticsearchException {
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
NodesInfoRequest request = nodeRequest.request;
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
request.network(), request.transport(), request.http(), request.plugins());

View File

@ -77,7 +77,7 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction<Nod
}
@Override
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) throws ElasticsearchException {
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) {
NodesStatsRequest request = nodeStatsRequest.request;
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(), request.network(),
request.fs(), request.transport(), request.http(), request.breaker());

View File

@ -64,7 +64,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeOperatio
}
@Override
protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener<DeleteRepositoryResponse> listener) throws ElasticsearchException {
protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener<DeleteRepositoryResponse> listener) {
repositoriesService.unregisterRepository(
new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name())
.masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()),

View File

@ -64,7 +64,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera
}
@Override
protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener<GetRepositoriesResponse> listener) throws ElasticsearchException {
protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener<GetRepositoriesResponse> listener) {
MetaData metaData = state.metaData();
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) {

View File

@ -20,8 +20,7 @@
package org.elasticsearch.action.admin.cluster.repositories.put;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.bytes.BytesReference;
@ -218,7 +217,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
type(entry.getValue().toString());
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
throw new IllegalArgumentException("Malformed settings section, should include an inner object");
}
settings((Map<String, Object>) entry.getValue());
}
@ -236,7 +235,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
try {
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
throw new IllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
}
}
@ -260,7 +259,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
try {
return source(XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
throw new IllegalArgumentException("failed to parse repository source", e);
}
}
@ -274,7 +273,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
try {
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
throw new IllegalArgumentException("failed to parse template source", e);
}
}

View File

@ -64,7 +64,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeOperationAc
}
@Override
protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener<PutRepositoryResponse> listener) throws ElasticsearchException {
protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener<PutRepositoryResponse> listener) {
repositoriesService.registerRepository(
new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]",

View File

@ -68,7 +68,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio
}
@Override
protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener<VerifyRepositoryResponse> listener) throws ElasticsearchException {
protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener<VerifyRepositoryResponse> listener) {
repositoriesService.verifyRepository(request.name(), new ActionListener<RepositoriesService.VerifyResponse>() {
@Override
public void onResponse(RepositoriesService.VerifyResponse verifyResponse) {

View File

@ -67,7 +67,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA
}
@Override
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) throws ElasticsearchException {
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) {
private volatile ClusterState clusterStateToSend;

View File

@ -86,7 +86,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe
}
@Override
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) throws ElasticsearchException {
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) {
final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder();
final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder();

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.shards;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
@ -61,11 +60,11 @@ public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest<C
@Override
public ClusterSearchShardsRequest indices(String... indices) {
if (indices == null) {
throw new ElasticsearchIllegalArgumentException("indices must not be null");
throw new IllegalArgumentException("indices must not be null");
} else {
for (int i = 0; i < indices.length; i++) {
if (indices[i] == null) {
throw new ElasticsearchIllegalArgumentException("indices[" + i + "] must not be null");
throw new IllegalArgumentException("indices[" + i + "] must not be null");
}
}
}

View File

@ -67,7 +67,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadO
}
@Override
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) throws ElasticsearchException {
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices());
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.admin.cluster.snapshots.create;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
@ -379,13 +378,13 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
} else if (entry.getValue() instanceof ArrayList) {
indices((ArrayList<String>) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
}
} else if (name.equals("partial")) {
partial(nodeBooleanValue(entry.getValue()));
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
throw new IllegalArgumentException("malformed settings section, should indices an inner object");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("include_global_state")) {
@ -407,7 +406,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
}
return this;
@ -436,7 +435,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
throw new IllegalArgumentException("failed to parse repository source", e);
}
}
return this;
@ -452,7 +451,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse snapshot source", e);
throw new IllegalArgumentException("failed to parse snapshot source", e);
}
}

View File

@ -64,7 +64,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA
}
@Override
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) throws ElasticsearchException {
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) {
SnapshotsService.SnapshotRequest snapshotRequest =
new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
.indices(request.indices())

View File

@ -63,7 +63,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA
}
@Override
protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener<DeleteSnapshotResponse> listener) throws ElasticsearchException {
protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener<DeleteSnapshotResponse> listener) {
SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot());
snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
@Override

View File

@ -66,7 +66,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct
}
@Override
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) throws ElasticsearchException {
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) {
try {
ImmutableList.Builder<SnapshotInfo> snapshotInfoBuilder = ImmutableList.builder();
if (isAllSnapshots(request.snapshots())) {

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.admin.cluster.snapshots.restore;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions;
@ -493,7 +492,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
try {
return source(source.bytes());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for repository request", e);
throw new IllegalArgumentException("Failed to build json for repository request", e);
}
}
@ -512,13 +511,13 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
} else if (entry.getValue() instanceof ArrayList) {
indices((ArrayList<String>) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
}
} else if (name.equals("partial")) {
partial(nodeBooleanValue(entry.getValue()));
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed settings section");
throw new IllegalArgumentException("malformed settings section");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("include_global_state")) {
@ -529,17 +528,17 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
if (entry.getValue() instanceof String) {
renamePattern((String) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed rename_pattern");
throw new IllegalArgumentException("malformed rename_pattern");
}
} else if (name.equals("rename_replacement")) {
if (entry.getValue() instanceof String) {
renameReplacement((String) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed rename_replacement");
throw new IllegalArgumentException("malformed rename_replacement");
}
} else if (name.equals("index_settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed index_settings section");
throw new IllegalArgumentException("malformed index_settings section");
}
indexSettings((Map<String, Object>) entry.getValue());
} else if (name.equals("ignore_index_settings")) {
@ -548,10 +547,10 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
} else if (entry.getValue() instanceof List) {
ignoreIndexSettings((List<String>) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed ignore_index_settings section, should be an array of strings");
throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings");
}
} else {
throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name);
throw new IllegalArgumentException("Unknown parameter " + name);
}
}
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
@ -571,7 +570,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
}
return this;
@ -604,7 +603,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
throw new IllegalArgumentException("failed to parse repository source", e);
}
}
return this;
@ -622,7 +621,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
throw new IllegalArgumentException("failed to parse template source", e);
}
}

View File

@ -70,7 +70,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeOperation
}
@Override
protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) throws ElasticsearchException {
protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) {
RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(
"restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(),
request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(),

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.snapshots.status;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
/**
*/
@ -92,7 +91,7 @@ public enum SnapshotIndexShardStage {
case 4:
return FAILURE;
default:
throw new ElasticsearchIllegalArgumentException("No snapshot shard stage for value [" + value + "]");
throw new IllegalArgumentException("No snapshot shard stage for value [" + value + "]");
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.snapshots.status;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -75,7 +74,7 @@ public class SnapshotIndexShardStatus extends BroadcastShardOperationResponse im
stage = SnapshotIndexShardStage.FAILURE;
break;
default:
throw new ElasticsearchIllegalArgumentException("Unknown stage type " + indexShardStatus.stage());
throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.stage());
}
stats = new SnapshotStats(indexShardStatus);
failure = indexShardStatus.failure();

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.snapshots.status;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
@ -59,7 +58,7 @@ public class SnapshotShardsStats implements ToXContent {
failedShards++;
break;
default:
throw new ElasticsearchIllegalArgumentException("Unknown stage type " + shard.getStage());
throw new IllegalArgumentException("Unknown stage type " + shard.getStage());
}
}
}

View File

@ -94,7 +94,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesOperationAction
}
@Override
protected NodeSnapshotStatus nodeOperation(NodeRequest request) throws ElasticsearchException {
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
ImmutableMap.Builder<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = ImmutableMap.builder();
try {
String nodeId = clusterService.localNode().id();

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
@ -180,7 +179,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation
stage = SnapshotIndexShardStage.DONE;
break;
default:
throw new ElasticsearchIllegalArgumentException("Unknown snapshot state " + shardEntry.getValue().state());
throw new IllegalArgumentException("Unknown snapshot state " + shardEntry.getValue().state());
}
SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), stage);
shardStatusBuilder.add(shardStatus);
@ -216,7 +215,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation
state = SnapshotMetaData.State.SUCCESS;
break;
default:
throw new ElasticsearchIllegalArgumentException("Unknown snapshot state " + snapshot.state());
throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state());
}
builder.add(new SnapshotStatus(snapshotId, state, shardStatusBuilder.build()));
}

View File

@ -79,7 +79,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio
}
@Override
protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) throws ElasticsearchException {
protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) {
ClusterState currentState = clusterService.state();
logger.trace("Serving cluster state request using version {}", currentState.version());
ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName());

View File

@ -97,7 +97,7 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction<C
}
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) throws ElasticsearchException {
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, false, true, false, true);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, false, true, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>();

View File

@ -62,7 +62,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO
}
@Override
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) throws ElasticsearchException {
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) {
listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.admin.indices.alias;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -187,7 +186,7 @@ public class Alias implements Streamable {
String currentFieldName = null;
XContentParser.Token token = parser.nextToken();
if (token == null) {
throw new ElasticsearchIllegalArgumentException("No alias is specified");
throw new IllegalArgumentException("No alias is specified");
}
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {

View File

@ -77,7 +77,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA
}
@Override
protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) throws ElasticsearchException {
protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) {
//Expand the indices names
List<AliasActions> actions = request.aliasActions();

View File

@ -58,7 +58,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio
}
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) throws ElasticsearchException {
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) {
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices);
listener.onResponse(new AliasesExistResponse(result));

View File

@ -61,7 +61,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA
}
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) throws ElasticsearchException {
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
@SuppressWarnings("unchecked") // ImmutableList to List results incompatible type
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);

View File

@ -26,9 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction;
import org.elasticsearch.cluster.ClusterService;
@ -98,7 +96,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
}
@Override
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) throws ElasticsearchException {
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) {
IndexService indexService = null;
if (shardId != null) {
indexService = indicesService.indexServiceSafe(shardId.getIndex());
@ -108,12 +106,12 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
String field = null;
if (request.field() != null) {
if (indexService == null) {
throw new ElasticsearchIllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
}
FieldMapper<?> fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field());
if (fieldMapper != null) {
if (fieldMapper.isNumeric()) {
throw new ElasticsearchIllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
}
analyzer = fieldMapper.indexAnalyzer();
field = fieldMapper.names().indexName();
@ -134,20 +132,20 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
analyzer = indexService.analysisService().analyzer(request.analyzer());
}
if (analyzer == null) {
throw new ElasticsearchIllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
}
} else if (request.tokenizer() != null) {
TokenizerFactory tokenizerFactory;
if (indexService == null) {
TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer());
if (tokenizerFactoryFactory == null) {
throw new ElasticsearchIllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
}
tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS);
} else {
tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
if (tokenizerFactory == null) {
throw new ElasticsearchIllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
}
}
@ -159,17 +157,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
if (indexService == null) {
TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName);
if (tokenFilterFactoryFactory == null) {
throw new ElasticsearchIllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS);
} else {
tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
if (tokenFilterFactories[i] == null) {
throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
}
}
if (tokenFilterFactories[i] == null) {
throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
}
}
}
@ -182,17 +180,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
if (indexService == null) {
CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName);
if (charFilterFactoryFactory == null) {
throw new ElasticsearchIllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS);
} else {
charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName);
if (charFilterFactories[i] == null) {
throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
}
}
if (charFilterFactories[i] == null) {
throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
}
}
}
@ -207,7 +205,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
}
}
if (analyzer == null) {
throw new ElasticsearchIllegalArgumentException("failed to find analyzer");
throw new IllegalArgumentException("failed to find analyzer");
}
List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
@ -220,7 +218,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
TypeAttribute type = stream.addAttribute(TypeAttribute.class);
int position = 0;
int position = -1;
while (stream.incrementToken()) {
int increment = posIncr.getPositionIncrement();
if (increment > 0) {

View File

@ -97,12 +97,10 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
}
@Override
protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) throws ElasticsearchException {
protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) {
IndexService service = indicesService.indexService(request.shardId().getIndex());
if (service != null) {
IndexShard shard = service.shard(request.shardId().id());
// we always clear the query cache
service.cache().queryParserCache().clear();
boolean clearedAtLeastOne = false;
if (request.filterCache()) {
clearedAtLeastOne = true;

View File

@ -75,7 +75,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio
}
@Override
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) throws ElasticsearchException {
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.create;
import com.google.common.base.Charsets;
import com.google.common.collect.Sets;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
@ -239,7 +238,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
try {
mappings.put(type, source.string());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
throw new IllegalArgumentException("Failed to build json for mapping request", e);
}
return this;
}

View File

@ -66,7 +66,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi
}
@Override
protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener<CreateIndexResponse> listener) throws ElasticsearchException {
protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener<CreateIndexResponse> listener) {
String cause = request.cause();
if (cause.length() == 0) {
cause = "api";

View File

@ -75,7 +75,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi
}
@Override
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) throws ElasticsearchException {
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
if (concreteIndices.length == 0) {
listener.onResponse(new DeleteIndexResponse(true));

View File

@ -64,7 +64,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadOperati
}
@Override
protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener<IndicesExistsResponse> listener) throws ElasticsearchException {
protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener<IndicesExistsResponse> listener) {
boolean exists;
try {
// Similar as the previous behaviour, but now also aliases and wildcards are supported.

View File

@ -61,7 +61,7 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation
}
@Override
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) throws ElasticsearchException {
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) {
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
if (concreteIndices.length == 0) {
listener.onResponse(new TypesExistsResponse(false));

View File

@ -90,7 +90,7 @@ public class TransportFlushAction extends TransportBroadcastOperationAction<Flus
}
@Override
protected ShardFlushResponse shardOperation(ShardFlushRequest request) throws ElasticsearchException {
protected ShardFlushResponse shardOperation(ShardFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
indexShard.flush(request.getRequest());
return new ShardFlushResponse(request.shardId());

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.get;
import com.google.common.collect.ObjectArrays;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
import org.elasticsearch.common.io.stream.StreamInput;
@ -74,18 +73,18 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
return this.validNames.contains(name);
}
public static Feature fromName(String name) throws ElasticsearchIllegalArgumentException {
public static Feature fromName(String name) {
for (Feature feature : Feature.values()) {
if (feature.validName(name)) {
return feature;
}
}
throw new ElasticsearchIllegalArgumentException("No feature for name [" + name + "]");
throw new IllegalArgumentException("No feature for name [" + name + "]");
}
public static Feature fromId(byte id) throws ElasticsearchIllegalArgumentException {
public static Feature fromId(byte id) {
if (id < 0 || id >= FEATURES.length) {
throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]");
throw new IllegalArgumentException("No mapping for id [" + id + "]");
}
return FEATURES[id];
}
@ -104,7 +103,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
public GetIndexRequest features(Feature... features) {
if (features == null) {
throw new ElasticsearchIllegalArgumentException("features cannot be null");
throw new IllegalArgumentException("features cannot be null");
} else {
this.features = features;
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.get;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
import org.elasticsearch.action.support.ActionFilters;
@ -70,7 +69,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
@Override
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
final ActionListener<GetIndexResponse> listener) throws ElasticsearchException {
final ActionListener<GetIndexResponse> listener) {
ImmutableOpenMap<String, ImmutableList<Entry>> warmersResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
@ -112,7 +111,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
break;
default:
throw new ElasticsearchIllegalStateException("feature [" + feature + "] is not valid");
throw new IllegalStateException("feature [" + feature + "] is not valid");
}
}
listener.onResponse(new GetIndexResponse(concreteIndices, warmersResult, mappingsResult, aliasesResult, settings));

View File

@ -87,7 +87,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO
}
@Override
protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) throws ElasticsearchException {
protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) {
assert shardId != null;
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
Collection<String> typeIntersection;
@ -173,7 +173,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO
}
};
private ImmutableMap<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) throws ElasticsearchException {
private ImmutableMap<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) {
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<>();
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
for (String field : request.fields()) {

View File

@ -60,7 +60,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
}
@Override
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetMappingsResponse> listener) throws ElasticsearchException {
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetMappingsResponse> listener) {
logger.trace("serving getMapping request based on version {}", state.version());
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = state.metaData().findMappings(
concreteIndices, request.types()

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.mapping.put;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
@ -170,7 +169,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
for (String s : s1) {
String[] s2 = Strings.split(s, "=");
if (s2.length != 2) {
throw new ElasticsearchIllegalArgumentException("malformed " + s);
throw new IllegalArgumentException("malformed " + s);
}
builder.field(s2[0], s2[1]);
}
@ -190,7 +189,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
for (String s : s1) {
String[] s2 = Strings.split(s, "=");
if (s2.length != 2) {
throw new ElasticsearchIllegalArgumentException("malformed " + s);
throw new IllegalArgumentException("malformed " + s);
}
builder.field(s2[0], s2[1]);
}
@ -203,7 +202,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
builder.endObject();
return builder;
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to generate simplified mapping definition", e);
throw new IllegalArgumentException("failed to generate simplified mapping definition", e);
}
}
@ -214,7 +213,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
try {
return source(mappingBuilder.string());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
throw new IllegalArgumentException("Failed to build json for mapping request", e);
}
}

View File

@ -65,7 +65,7 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio
}
@Override
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) throws ElasticsearchException {
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) {
final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices());
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())

View File

@ -75,7 +75,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeOperationAction
}
@Override
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) throws ElasticsearchException {
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) {
final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())

View File

@ -91,7 +91,7 @@ public class TransportOptimizeAction extends TransportBroadcastOperationAction<O
}
@Override
protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) throws ElasticsearchException {
protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
indexShard.optimize(request.optimizeRequest());
return new ShardOptimizeResponse(request.shardId());

View File

@ -124,7 +124,7 @@ public class TransportRecoveryAction extends TransportBroadcastOperationAction<R
}
@Override
protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) throws ElasticsearchException {
protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id());

View File

@ -91,7 +91,7 @@ public class TransportRefreshAction extends TransportBroadcastOperationAction<Re
}
@Override
protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) throws ElasticsearchException {
protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
indexShard.refresh("api");
logger.trace("{} refresh request executed", indexShard.shardId());

View File

@ -117,7 +117,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastOperationA
}
@Override
protected ShardSegments shardOperation(IndexShardSegmentRequest request) throws ElasticsearchException {
protected ShardSegments shardOperation(IndexShardSegmentRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose));

View File

@ -72,7 +72,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadOperation
}
@Override
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) throws ElasticsearchException {
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) {
String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices());
ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
for (String concreteIndex : concreteIndices) {

View File

@ -74,7 +74,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeOperationA
}
@Override
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) throws ElasticsearchException {
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) {
final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices());
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(concreteIndices)

View File

@ -119,7 +119,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi
}
@Override
protected ShardStats shardOperation(IndexShardStatsRequest request) throws ElasticsearchException {
protected ShardStats shardOperation(IndexShardStatsRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
// if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet

View File

@ -63,7 +63,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOpera
}
@Override
protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener<DeleteIndexTemplateResponse> listener) throws ElasticsearchException {
protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener<DeleteIndexTemplateResponse> listener) {
indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() {
@Override
public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) {

View File

@ -63,7 +63,7 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOpe
}
@Override
protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) throws ElasticsearchException {
protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) {
List<IndexTemplateMetaData> results;
// If we did not ask for a specific name, then we return all templates

Some files were not shown because too many files have changed in this diff Show More