Merge pull request #1 from elasticsearch/master

sync with es/es
This commit is contained in:
b.hwang 2014-01-15 15:55:48 -08:00
commit b358f13954
142 changed files with 5047 additions and 921 deletions

View File

@ -58,6 +58,12 @@ Once it's done it will print all the remaining steps.
""" """
env = os.environ env = os.environ
PLUGINS = [('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log') LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
def log(msg): def log(msg):
@ -117,10 +123,11 @@ def verify_mvn_java_version(version, mvn):
# Returns the hash of the current git HEAD revision # Returns the hash of the current git HEAD revision
def get_head_hash(): def get_head_hash():
return get_hash('HEAD') return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
def get_hash(version): # Returns the hash of the given tag revision
return os.popen('git rev-parse --verify %s 2>&1' % (version)).read().strip() def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch # Returns the name of the current branch
def get_current_branch(): def get_current_branch():
@ -133,6 +140,10 @@ verify_mvn_java_version('1.6', MVN)
def release_branch(version): def release_branch(version):
return 'release_branch_%s' % version return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch # Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating # and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch # the release branch. Note: This fails if the source branch
@ -309,7 +320,7 @@ def generate_checksums(files):
res = res + [os.path.join(directory, checksum_file), release_file] res = res + [os.path.join(directory, checksum_file), release_file]
return res return res
def download_and_verify(release, files, base_url='https://download.elasticsearch.org/elasticsearch/elasticsearch'): def download_and_verify(release, files, plugins=None, base_url='https://download.elasticsearch.org/elasticsearch/elasticsearch'):
print('Downloading and verifying release %s from %s' % (release, base_url)) print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp() tmp_dir = tempfile.mkdtemp()
try: try:
@ -326,11 +337,12 @@ def download_and_verify(release, files, base_url='https://download.elasticsearch
urllib.request.urlretrieve(url, checksum_file) urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file)) print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file))) run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_hash('v%s' % release)) smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally: finally:
shutil.rmtree(tmp_dir) shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash): def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files: for release_file in files:
if not os.path.isfile(release_file): if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file)) raise RuntimeError('Smoketest failed missing file %s' % (release_file))
@ -344,9 +356,20 @@ def smoke_test_release(release, files, expected_hash):
continue # nothing to do here continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch') es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file) print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s %s %s' % (es_plugin_path, '-install', plugin))
plugin_names[name] = True
if release.startswith("0.90."):
background = '' # 0.90.x starts in background automatically
else:
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release)) print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -d' run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false %s'
% (java_exe(), es_run_path)) % (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20); conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup() wait_for_node_startup()
try: try:
@ -360,9 +383,25 @@ def smoke_test_release(release, files, expected_hash):
if version['build_snapshot']: if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version') raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash: if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (get_head_hash(), version['build_hash'])) raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file) print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.rest=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9200")) run_mvn('test -Dtests.rest=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9200"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else: else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status) raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally: finally:
@ -471,14 +510,11 @@ if __name__ == '__main__':
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run)) print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME) print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN)) print(' Running with maven command: [%s] ' % (MVN))
release_version = find_release_version(src_branch)
if not smoke_test_version and not dry_run:
smoke_test_version = release_version
elif smoke_test_version:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
if build: if build:
release_version = find_release_version(src_branch)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash() head_hash = get_head_hash()
run_mvn('clean') # clean the env! run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version) print(' Release version: [%s]' % release_version)
@ -497,11 +533,14 @@ if __name__ == '__main__':
print(''.join(['-' for _ in range(80)])) print(''.join(['-' for _ in range(80)]))
print('Building Release candidate') print('Building Release candidate')
input('Press Enter to continue...') input('Press Enter to continue...')
print(' Running maven builds now and publish to sonartype- run-tests [%s]' % run_tests) if not dry_run:
print(' Running maven builds now and publish to sonartype - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(run_tests=run_tests, dry_run=dry_run, cpus=cpus) build_release(run_tests=run_tests, dry_run=dry_run, cpus=cpus)
artifacts = get_artifacts(release_version) artifacts = get_artifacts(release_version)
artifacts_and_checksum = generate_checksums(artifacts) artifacts_and_checksum = generate_checksums(artifacts)
smoke_test_release(release_version, artifacts, get_head_hash()) smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)])) print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run) print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...') input('Press Enter to continue...')
@ -530,5 +569,10 @@ if __name__ == '__main__':
run('git tag -d v%s' % release_version) run('git tag -d v%s' % release_version)
# we delete this one anyways # we delete this one anyways
run('git branch -D %s' % (release_branch(release_version))) run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version: if smoke_test_version:
download_and_verify(smoke_test_version, artifact_names(smoke_test_version)) fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)

View File

@ -11,7 +11,8 @@ following types are supported: `arabic`, `armenian`, `basque`,
All analyzers support setting custom `stopwords` either internally in All analyzers support setting custom `stopwords` either internally in
the config, or by using an external stopwords file by setting the config, or by using an external stopwords file by setting
`stopwords_path`. `stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
more details.
The following analyzers support setting custom `stem_exclusion` list: The following analyzers support setting custom `stem_exclusion` list:
`arabic`, `armenian`, `basque`, `brazilian`, `bulgarian`, `catalan`, `arabic`, `armenian`, `basque`, `brazilian`, `bulgarian`, `catalan`,

View File

@ -14,8 +14,9 @@ type:
|`pattern` |The regular expression pattern, defaults to `\W+`. |`pattern` |The regular expression pattern, defaults to `\W+`.
|`flags` |The regular expression flags. |`flags` |The regular expression flags.
|`stopwords` |A list of stopwords to initialize the stop filter with. |`stopwords` |A list of stopwords to initialize the stop filter with.
Defaults to an 'empty' stopword list coming[1.0.0.RC1, Previously Defaults to an 'empty' stopword list added[1.0.0.RC1, Previously
defaulted to the English stopwords list] defaulted to the English stopwords list]. Check
<<analysis-stop-analyzer,Stop Analyzer>> for more details.
|=================================================================== |===================================================================
*IMPORTANT*: The regular expression should match the *token separators*, *IMPORTANT*: The regular expression should match the *token separators*,

View File

@ -41,8 +41,9 @@ filter>> and defaults to `English`. Note that not all the language
analyzers have a default set of stopwords provided. analyzers have a default set of stopwords provided.
The `stopwords` parameter can be used to provide stopwords for the The `stopwords` parameter can be used to provide stopwords for the
languages that has no defaults, or to simply replace the default set languages that have no defaults, or to simply replace the default set
with your custom list. A default set of stopwords for many of these with your custom list. Check <<analysis-stop-analyzer,Stop Analyzer>>
for more details. A default set of stopwords for many of these
languages is available from for instance languages is available from for instance
https://github.com/apache/lucene-solr/tree/trunk/lucene/analysis/common/src/resources/org/apache/lucene/analysis/[here] https://github.com/apache/lucene-solr/tree/trunk/lucene/analysis/common/src/resources/org/apache/lucene/analysis/[here]
and and

View File

@ -17,9 +17,10 @@ type:
[cols="<,<",options="header",] [cols="<,<",options="header",]
|======================================================================= |=======================================================================
|Setting |Description |Setting |Description
|`stopwords` |A list of stopword to initialize the stop filter with. |`stopwords` |A list of stopwords to initialize the stop filter with.
Defaults to an 'empty' stopword list added[1.0.0.Beta1, Previously Defaults to an 'empty' stopword list added[1.0.0.Beta1, Previously
defaulted to the English stopwords list] defaulted to the English stopwords list]. Check
<<analysis-stop-analyzer,Stop Analyzer>> for more details.
|`max_token_length` |The maximum token length. If a token is seen that |`max_token_length` |The maximum token length. If a token is seen that
exceeds this length then it is discarded. Defaults to `255`. exceeds this length then it is discarded. Defaults to `255`.
|======================================================================= |=======================================================================

View File

@ -12,10 +12,11 @@ The following are settings that can be set for a `stop` analyzer type:
[cols="<,<",options="header",] [cols="<,<",options="header",]
|======================================================================= |=======================================================================
|Setting |Description |Setting |Description
|`stopwords` |A list of stopword to initialize the stop filter with. |`stopwords` |A list of stopwords to initialize the stop filter with.
Defaults to the english stop words. Defaults to the english stop words.
|`stopwords_path` |A path (either relative to `config` location, or |`stopwords_path` |A path (either relative to `config` location, or
absolute) to a stopwords file configuration. absolute) to a stopwords file configuration.
|======================================================================= |=======================================================================
Use `stopwords: _none_` to explicitly specify an 'empty' stopword list.

View File

@ -26,17 +26,30 @@ support wildcards, for example: `test*`, and the ability to "add" (`+`)
and "remove" (`-`), for example: `+test*,-test3`. and "remove" (`-`), for example: `+test*,-test3`.
All multi indices API support the following url query string parameters: All multi indices API support the following url query string parameters:
* `ignore_unavailable` - Controls whether to ignore if any specified indices are unavailable, this includes indices
that don't exist or closed indices. Either `true` or `false` can be specified.
* `allow_no_indices` - Controls whether to fail if a wildcard indices expressions results into no concrete indices.
Either `true` or `false` can be specified. For example if the wildcard expression `foo*` is specified and no indices
are available that start with `foo` then depending on this setting the request will fail. This setting is also applicable
when `_all`, `*` or no index has been specified.
* `expand_wildcards` - Controls to what kind of concrete indices wildcard indices expression expand to. If `open` is
specified then the wildcard expression if expanded to only open indices and if `closed` is specified then the wildcard
expression if expanded only to closed indices. Also both values (`open,closed`) can be specified to expand to all indices.
The defaults settings for the above parameters dependent on the api being used. `ignore_unavailable`::
Controls whether to ignore if any specified indices are unavailable, this
includes indices that don't exist or closed indices. Either `true` or `false`
can be specified.
`allow_no_indices`::
Controls whether to fail if a wildcard indices expressions results into no
concrete indices. Either `true` or `false` can be specified. For example if
the wildcard expression `foo*` is specified and no indices are available that
start with `foo` then depending on this setting the request will fail. This
setting is also applicable when `_all`, `*` or no index has been specified.
`expand_wildcards`::
Controls to what kind of concrete indices wildcard indices expression expand
to. If `open` is specified then the wildcard expression if expanded to only
open indices and if `closed` is specified then the wildcard expression if
expanded only to closed indices. Also both values (`open,closed`) can be
specified to expand to all indices.
The defaults settings for the above parameters depend on the api being used.
NOTE: Single index APIs such as the <<docs>> and the NOTE: Single index APIs such as the <<docs>> and the
<<indices-aliases,single-index `alias` APIs>> do not support multiple indices. <<indices-aliases,single-index `alias` APIs>> do not support multiple indices.

View File

@ -33,7 +33,7 @@ The result of the above index operation is:
The index operation automatically creates an index if it has not been The index operation automatically creates an index if it has not been
created before (check out the created before (check out the
<<indices-create-index,create index API>> for manually <<indices-create-index,create index API>> for manually
creating an index), and also automatically creates a creating an index), and also automatically creates a
dynamic type mapping for the specific type if one has not yet been dynamic type mapping for the specific type if one has not yet been
created (check out the <<indices-put-mapping,put mapping>> created (check out the <<indices-put-mapping,put mapping>>
@ -44,12 +44,21 @@ objects will automatically be added to the mapping definition of the
type specified. Check out the <<mapping,mapping>> type specified. Check out the <<mapping,mapping>>
section for more information on mapping definitions. section for more information on mapping definitions.
Though explained on the <<mapping,mapping>> section, Note that the format of the JSON document can also include the type (very handy
it's important to note that the format of the JSON document can also when using JSON mappers) if the `index.mapping.allow_type_wrapper` setting is
include the type (very handy when using JSON mappers), for example: set to true, for example:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
$ curl -XPOST 'http://localhost:9200/twitter' -d '{
"settings": {
"index": {
"mapping.allow_type_wrapper": true
}
}
}'
{"acknowledged":true}
$ curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{ $ curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
"tweet" : { "tweet" : {
"user" : "kimchy", "user" : "kimchy",

View File

@ -24,7 +24,39 @@ field data after a certain time of inactivity. Defaults to `-1`. For
example, can be set to `5m` for a 5 minute expiry. example, can be set to `5m` for a 5 minute expiry.
|======================================================================= |=======================================================================
=== Field data formats [float]
[[fielddata-circuit-breaker]]
=== Field data circuit breaker
The field data circuit breaker allows Elasticsearch to estimate the amount of
memory a field will required to be loaded into memory. It can then prevent the
field data loading by raising and exception. By default the limit is configured
to 80% of the maximum JVM heap. It can be configured with the following
parameters:
[cols="<,<",options="header",]
|=======================================================================
|Setting |Description
|`indices.fielddata.breaker.limit` |Maximum size of estimated field data
to allow loading. Defaults to 80% of the maximum JVM heap.
|`indices.fielddata.breaker.overhead` |A constant that all field data
estimations are multiplied with to determine a final estimation. Defaults to
1.03
|=======================================================================
Both the `indices.fielddata.breaker.limit` and
`indices.fielddata.breaker.overhead` can be changed dynamically using the
cluster update settings API.
[float]
[[fielddata-monitoring]]
=== Monitoring field data
You can monitor memory usage for field data as well as the field data circuit
breaker using
<<cluster-nodes-stats,Nodes Stats API>>
[[fielddata-formats]]
== Field data formats
The field data format controls how field data should be stored. The field data format controls how field data should be stored.
@ -236,34 +268,3 @@ The `frequency` and `regex` filters can be combined:
} }
} }
-------------------------------------------------- --------------------------------------------------
[float]
[[field-data-circuit-breaker]]
=== Field data circuit breaker
The field data circuit breaker allows Elasticsearch to estimate the amount of
memory a field will required to be loaded into memory. It can then prevent the
field data loading by raising and exception. By default it is configured with
no limit (-1 bytes), but is automatically set to `indices.fielddata.cache.size`
if set. It can be configured with the following parameters:
[cols="<,<",options="header",]
|=======================================================================
|Setting |Description
|`indices.fielddata.breaker.limit` |Maximum size of estimated field data
to allow loading. Defaults to 80% of the maximum JVM heap.
|`indices.fielddata.breaker.overhead` |A constant that all field data
estimations are multiplied with to determine a final estimation. Defaults to
1.03
|=======================================================================
Both the `indices.fielddata.breaker.limit` and
`indices.fielddata.breaker.overhead` can be changed dynamically using the
cluster update settings API.
[float]
[[field-data-monitoring]]
=== Monitoring field data
You can monitor memory usage for field data as well as the field data circuit
breaker using
<<cluster-nodes-stats,Nodes Stats API>>

View File

@ -81,6 +81,7 @@ Lucene `NIOFSDirectory`) using NIO. It allows multiple threads to read
from the same file concurrently. It is not recommended on Windows from the same file concurrently. It is not recommended on Windows
because of a bug in the SUN Java implementation. because of a bug in the SUN Java implementation.
[[mmapfs]]
[float] [float]
==== MMap FS ==== MMap FS

View File

@ -3,6 +3,8 @@
include::setup.asciidoc[] include::setup.asciidoc[]
include::migration/migrate_1_0.asciidoc[]
include::api-conventions.asciidoc[] include::api-conventions.asciidoc[]
include::docs.asciidoc[] include::docs.asciidoc[]
@ -29,3 +31,5 @@ include::testing.asciidoc[]
include::glossary.asciidoc[] include::glossary.asciidoc[]

View File

@ -153,17 +153,22 @@ curl -XGET 'http://localhost:9200/alias2/_search?q=user:kimchy&routing=2,3'
[float] [float]
[[alias-adding]] [[alias-adding]]
=== Add a single index alias === Add a single alias
There is also an api to add a single index alias, with options: An alias can also be added with the endpoint
`PUT /{index}/_alias/{name}`
where
[horizontal] [horizontal]
`index`:: The index to alias refers to. This is a required option. `index`:: The index to alias refers to. Can be any of `blank | * | _all | glob pattern | name1, name2, …`
`alias`:: The name of the alias. This is a required option. `name`:: The name of the alias. This is a required option.
`routing`:: An optional routing that can be associated with an alias. `routing`:: An optional routing that can be associated with an alias.
`filter`:: An optional filter that can be associated with an alias. `filter`:: An optional filter that can be associated with an alias.
The rest endpoint is: `/{index}/_alias/{alias}`. You can also use the plural `_aliases`.
[float] [float]
==== Examples: ==== Examples:
@ -191,16 +196,18 @@ curl -XPUT 'localhost:9200/users/_alias/user_12' -d '{
[float] [float]
[[deleting]] [[deleting]]
=== Delete a single index alias === Delete aliases
Th API to delete a single index alias, has options:
The rest endpoint is: `/{index}/_alias/{name}`
where
[horizontal] [horizontal]
`index`:: The index the alias is in, the needs to be deleted. This is `index`:: `* | _all | glob pattern | name1, name2, …`
a required option. `name`:: `* | _all | glob pattern | name1, name2, …`
`alias`:: The name of the alias to delete. This is a required option.
The rest endpoint is: `/{index}/_alias/{alias}`. Example: Alternatively you can use the plural `_aliases`. Example:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------

View File

@ -1,8 +1,25 @@
[[indices-delete-mapping]] [[indices-delete-mapping]]
== Delete Mapping == Delete Mapping
Allow to delete a mapping (type) along with its data. The REST endpoint Allow to delete a mapping (type) along with its data. The REST endpoints are
is `/{index}/{type}` with `DELETE` method.
[source,js]
--------------------------------------------------
[DELETE] /{index}/{type}
[DELETE] /{index}/{type}/_mapping
[DELETE] /{index}/_mapping/{type}
--------------------------------------------------
where
[horizontal]
`index`:: `* | _all | glob pattern | name1, name2, …`
`type`:: `* | _all | glob pattern | name1, name2, …`
Note, most times, it make more sense to reindex the data into a fresh Note, most times, it make more sense to reindex the data into a fresh
index compared to delete large chunks of it. index compared to delete large chunks of it.

View File

@ -39,12 +39,12 @@ curl -XGET 'http://localhost:9200/my-index/_settings?prefix=index.'
curl -XGET 'http://localhost:9200/_all/_settings?prefix=index.routing.allocation.' curl -XGET 'http://localhost:9200/_all/_settings?prefix=index.routing.allocation.'
curl -XGET 'http://localhost:9200/2013-*/_settings?prefix=index.merge.' curl -XGET 'http://localhost:9200/2013-*/_settings?name=index.merge.*'
curl -XGET 'http://localhost:9200/2013-*/index.merge./_settings' curl -XGET 'http://localhost:9200/2013-*/_settings/index.merge.*'
-------------------------------------------------- --------------------------------------------------
The first example returns all index settings the start with `index.` in the index `my-index`, The first example returns all index settings the start with `index.` in the index `my-index`,
the second example gets all index settings that start with `index.routing.allocation.` for the second example gets all index settings that start with `index.routing.allocation.` for
all indices, lastly the third example returns all index settings that start with `index.merge.` all indices, lastly the third example returns all index settings that start with `index.merge.`
in indices that start with `2013-`. in indices that start with `2013-`.

View File

@ -59,3 +59,25 @@ $ curl -XPUT 'http://localhost:9200/kimchy,elasticsearch/tweet/_mapping' -d '
} }
' '
-------------------------------------------------- --------------------------------------------------
All options:
[source,js]
--------------------------------------------------
PUT /{index}/_mapping/{type}
--------------------------------------------------
where
[horizontal]
`{index}`:: `blank | * | _all | glob pattern | name1, name2, …`
`{type}`:: Name of the type to add. Must be the name of the type defined in the body.
Instead of `_mapping` you can also use the plural `_mappings`.
The uri `PUT /{index}/{type}/_mapping` is still supported for backwardscompatibility.

View File

@ -112,25 +112,55 @@ curl -XPUT localhost:9200/test/type1/_warmer/warmer_1 -d '{
}' }'
-------------------------------------------------- --------------------------------------------------
[float] All options:
[[removing]]
=== Delete Warmer
Removing a warmer can be done against an index (or alias / indices)
based on its name. The provided name can be a simple wildcard expression
or omitted to remove all warmers. Some samples:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
# delete warmer named warmer_1 on test index
curl -XDELETE localhost:9200/test/_warmer/warmer_1
# delete all warmers that start with warm on test index PUT _warmer/{warmer_name}
curl -XDELETE localhost:9200/test/_warmer/warm*
PUT /{index}/_warmer/{warmer_name}
PUT /{index}/{type}/_warmer/{warmer_name}
# delete all warmers for test index
curl -XDELETE localhost:9200/test/_warmer/
-------------------------------------------------- --------------------------------------------------
where
[horizontal]
`{index}`:: `* | _all | glob pattern | name1, name2, …`
`{type}`:: `* | _all | glob pattern | name1, name2, …`
Instead of `_warmer` you can also use the plural `_warmers`.
[float]
[[removing]]
=== Delete Warmers
Warmers can be deleted using the following endpoint:
[source,js]
--------------------------------------------------
[DELETE] /{index}/_warmer/{name}
--------------------------------------------------
where
[horizontal]
`{index}`:: `* | _all | glob pattern | name1, name2, …`
`{name}`:: `* | _all | glob pattern | name1, name2, …`
Instead of `_warmer` you can also use the plural `_warmers`.
[float] [float]
[[warmer-retrieving]] [[warmer-retrieving]]

View File

@ -0,0 +1,349 @@
[[breaking-changes]]
= Breaking changes in 1.0
[partintro]
--
This section discusses the changes that you need to be aware of when migrating
your application to Elasticsearch 1.0.
--
== System and settings
* Elasticsearch now runs in the foreground by default. There is no more `-f`
flag on the command line. Instead, to run elasticsearch as a daemon, use
the `-d` flag:
[source,sh]
---------------
./bin/elasticsearch -d
---------------
* Command line settings can now be passed without the `-Des.` prefix, for
instance:
[source,sh]
---------------
./bin/elasticsearch --node.name=search_1 --cluster.name=production
---------------
* Elasticsearch on 64 bit Linux now uses <<mmapfs,`mmapfs`>> by default. Make
sure that you set <<setup-service,`MAX_MAP_COUNT`>> to a sufficiently high
number. The RPM and Debian packages default this value to `262144`.
* The RPM and Debian packages no longer start Elasticsearch by default.
* The `cluster.routing.allocation` settings (`disable_allocation`,
`disable_new_allocation` and `disable_replica_location`) have been
<<modules-cluster,replaced by the single setting>>:
[source,yaml]
---------------
cluster.routing.allocation.enable: all|primaries|new_primaries|none
---------------
== Stats and Info APIs
The <<cluster-state,`cluster_state`>>, <<cluster-nodes-info,`nodes_info`>>,
<<cluster-nodes-stats,`nodes_stats`>> and <<indices-stats,`indices_stats`>>
APIs have all been changed to make their format more RESTful and less clumsy.
For instance, if you just want the `nodes` section of the the `cluster_state`,
instead of:
[source,sh]
---------------
GET /_cluster/state?filter_metadata&filter_routing_table&filter_blocks
---------------
you now use:
[source,sh]
---------------
GET /_cluster/state/nodes
---------------
Simliarly for the `nodes_stats` API, if you want the `transport` and `http`
metrics only, instead of:
[source,sh]
---------------
GET /_nodes/stats?clear&transport&http
---------------
you now use:
[source,sh]
---------------
GET /_nodes/stats/transport,http
---------------
See the links above for full details.
== Indices APIs
The `mapping`, `alias`, `settings`, and `warmer` index APIs are all similar
but there are subtle differences in the order of the URL and the response
body. For instance, adding a mapping and a warmer look slightly different:
[source,sh]
---------------
PUT /{index}/{type}/_mapping
PUT /{index}/_warmer/{name}
---------------
These URLs have been unified as:
[source,sh]
---------------
PUT /{indices}/_mapping/{type}
PUT /{indices}/_alias/{name}
PUT /{indices}/_warmer/{name}
GET /{indices}/_mapping/{types}
GET /{indices}/_alias/{names}
GET /{indices}/_settings/{names}
GET /{indices}/_warmer/{names}
DELETE /{indices}/_mapping/{types}
DELETE /{indices}/_alias/{names}
DELETE /{indices}/_warmer/{names}
---------------
All of the `{indices}`, `{types}` and `{names}` parameters can be replaced by:
* `_all`, `*` or blank (ie left out altogether), all of which mean ``all''
* wildcards like `test*`
* comma-separated lists: `index_1,test_*`
The only exception is `DELETE` which doesn't accept blank (missing)
parameters. If you want to delete something, you should be specific.
Similarly, the return values for `GET` have been unified with the following
rules:
* Only return values that exist. If you try to `GET` a mapping which doesn't
exist, then the result will be an empty object: `{}`. We no longer throw a
`404` if the requested mapping/warmer/alias/setting doesn't exist.
* The response format always has the index name, then the section, then the
element name, for instance:
[source,json]
---------------
{
"my_index": {
"mappings": {
"my_type": {...}
}
}
}
---------------
+
This is a breaking change for the `get_mapping` API.
In the future we will also provide plural versions to allow putting multiple mappings etc in a single request.
See <<indices-put-mapping,`put-mapping`>>, <<indices-get-mapping,`get-
mapping`>>, <<indices-get-field-mapping,`get-field-mapping`>>,
<<indices-delete-mapping,`delete-mapping`>>,
<<indices-update-settings,`update-settings`>>, <<indices-get-settings,`get-settings`>>,
<<indices-warmers,`warmers`>>, and <<indices-aliases,`aliases`>> for more details.
== Index request
Previously a document could be indexed as itself, or wrapped in an outer
object which specified the `type` name:
[source,json]
---------------
PUT /my_index/my_type/1
{
"my_type": {
... doc fields ...
}
}
---------------
This led to some ambiguity when a document also included a field with the same
name as the `type`. We no longer accept the outer `type` wrapper, but this
behaviour can be reenabled on an index-by-index basis with the setting:
`index.mapping.allow_type_wrapper`.
== Search requests
While the `search` API takes a top-level `query` parameter, the
<<search-count,`count`>>, <<docs-delete-by-query,`delete-by-query`>> and
<<search-validate,`validate-query`>> requests expected the whole body to be a
query. These have been changed to all accept a top-level `query` parameter:
[source,json]
---------------
GET /_count
{
"query": {
"match": {
"title": "Interesting stuff"
}
}
}
---------------
Also, the top-level `filter` parameter in search has been renamed to
<<search-request-post-filter,`post_filter`>>, to indicate that it should not
be used as the primary way to filter search results (use a
<<query-dsl-filtered-query,`filtered` query>> instead), but only to filter
results AFTER facets/aggregations have been calculated.
This example counts the top colors in all matching docs, but only returns docs
with color `red`:
[source,json]
---------------
GET /_search
{
"query": {
"match_all": {}
},
"aggs": {
"colors": {
"terms": { "field": "color" }
}
},
"post_filter": {
"term": {
"color": "red"
}
}
}
---------------
== Multi-fields
Multi-fields are dead! Long live multi-fields! Well, the field type
`multi_field` has been removed. Instead, any of the core field types
(excluding `object` and `nested`) now accept a `fields` parameter. It's the
same thing, but nicer. Instead of:
[source,json]
---------------
"title": {
"type": "multi_field",
"fields": {
"title": { "type": "string" },
"raw": { "type": "string", "index": "not_analyzed" }
}
}
---------------
you can now write:
[source,json]
---------------
"title": {
"type": "string";
"fields": {
"raw": { "type": "string", "index": "not_analyzed" }
}
}
---------------
Existing multi-fields will be upgraded to the new format automatically.
== Stopwords
Previously, the <<analysis-standard-analyzer,`standard`>> and
<<analysis-pattern-analyzer,`pattern`>> analyzers used the list of English stopwords
by default, which caused some hard to debug indexing issues. Now they are set to
use the empty stopwords list (ie `_none_`) instead.
== Dates without years
When dates are specified without a year, for example: `Dec 15 10:00:00` they
are treated as dates in 2000 during indexing and range searches... except for
the upper included bound `lte` where they were treated as dates in 1970! Now,
all https://github.com/elasticsearch/elasticsearch/issues/4451[dates without years]
use `1970` as the default.
== Parameters
* Geo queries used to use `miles` as the default unit. And we
http://en.wikipedia.org/wiki/Mars_Climate_Orbiter[all know what
happened at NASA] because of that decision. The new default unit is
https://github.com/elasticsearch/elasticsearch/issues/4515[`meters`].
* For all queries that support _fuzziness_, the `min_similarity`, `fuzziness`
and `edit_distance` parameters have been unified as the single parameter
`fuzziness`. See <<fuzziness>> for details of accepted values.
* The `ignore_missing` parameter has been replaced by the `expand_wildcards`,
`ignore_unavailable` and `allow_no_indices` parameters, all of which have
sensible defaults. See <<multi-index,the multi-index docs>> for more.
* An index name (or pattern) is now required for destructive operations like
deleting indices:
[source,sh]
---------------
# v0.90 - delete all indices:
DELETE /
# v1.0 - delete all indices:
DELETE /_all
DELETE /*
---------------
+
Setting `action.destructive_requires_name` to `true` provides further safety
by disabling wildcard expansion on destructive actions.
== Return values
* The `ok` return value has been removed from all response bodies as it added
no useful information.
* The `found`, `not_found` and `exists` return values have been unified as
`found` on all relevant APIs.
* Field values, in response to the <<search-request-fields,`fields`>>
parameter, are now always returned as arrays. A field could have single or
multiple values, which meant that sometimes they were returned as scalars
and sometimes as arrays. By always returning arrays, this simplifies user
code. The only exception to this rule is when `fields` is used to retrieve
metadata like the `routing` value, which are always singular. Metadata
fields are always returned as scalars.
* Settings, like `index.analysis.analyzer.default` are now returned as proper
nested JSON objects, which makes them easier to work with programatically:
[source,json]
---------------
{
"index": {
"analysis": {
"analyzer": {
"default": xxx
}
}
}
}
---------------
+
You can choose to return them in flattened format by passing `?flat_settings`
in the query string.
* The <<indices-analyze,`analyze`>> API no longer supports the text response
format, but does support JSON and YAML.
== Deprecations
* The `text` query has been removed. Use the
<<query-dsl-match-query,`match`>> query instead.
* The `field` query has been removed. Use the
<<query-dsl-query-string-query,`query_string`>> query instead.
* Per-document boosting with the <<mapping-boost-field,`_boost`>> field has
been removed. You can use the
<<function-score-instead-of-boost,`function_score`>> instead.

View File

@ -17,6 +17,8 @@ include::modules/network.asciidoc[]
include::modules/node.asciidoc[] include::modules/node.asciidoc[]
include::modules/tribe.asciidoc[]
include::modules/plugins.asciidoc[] include::modules/plugins.asciidoc[]
include::modules/scripting.asciidoc[] include::modules/scripting.asciidoc[]

View File

@ -12,70 +12,57 @@ handling nodes being added or removed.
The following settings may be used: The following settings may be used:
`cluster.routing.allocation.allow_rebalance`:: `cluster.routing.allocation.allow_rebalance`::
Allow to control when rebalancing will happen based on the total Allow to control when rebalancing will happen based on the total
state of all the indices shards in the cluster. `always`, state of all the indices shards in the cluster. `always`,
`indices_primaries_active`, and `indices_all_active` are allowed, `indices_primaries_active`, and `indices_all_active` are allowed,
defaulting to `indices_all_active` to reduce chatter during defaulting to `indices_all_active` to reduce chatter during
initial recovery. initial recovery.
`cluster.routing.allocation.cluster_concurrent_rebalance`:: `cluster.routing.allocation.cluster_concurrent_rebalance`::
Allow to control how many concurrent rebalancing of shards are Allow to control how many concurrent rebalancing of shards are
allowed cluster wide, and default it to `2`. allowed cluster wide, and default it to `2`.
`cluster.routing.allocation.node_initial_primaries_recoveries`:: `cluster.routing.allocation.node_initial_primaries_recoveries`::
Allow to control specifically the number of initial recoveries Allow to control specifically the number of initial recoveries
of primaries that are allowed per node. Since most times local of primaries that are allowed per node. Since most times local
gateway is used, those should be fast and we can handle more of gateway is used, those should be fast and we can handle more of
those per node without creating load. those per node without creating load.
`cluster.routing.allocation.node_concurrent_recoveries`:: `cluster.routing.allocation.node_concurrent_recoveries`::
How many concurrent recoveries are allowed to happen on a node. How many concurrent recoveries are allowed to happen on a node.
Defaults to `2`. Defaults to `2`.
added[1.0.0.RC1]
`cluster.routing.allocation.enable`:: `cluster.routing.allocation.enable`::
Controls shard allocation for all indices, by allowing specific Controls shard allocation for all indices, by allowing specific
kinds of shard to be allocated. Can be set to: kinds of shard to be allocated.
added[1.0.0.RC1,Replaces `cluster.routing.allocation.disable*`]
Can be set to:
* `all` (default) - Allows shard allocation for all kinds of shards. * `all` (default) - Allows shard allocation for all kinds of shards.
* `primaries` - Allows shard allocation only for primary shards. * `primaries` - Allows shard allocation only for primary shards.
* `new_primaries` - Allows shard allocation only for primary shards for new indices. * `new_primaries` - Allows shard allocation only for primary shards for new indices.
* `none` - No shard allocations of any kind are allowed for all indices. * `none` - No shard allocations of any kind are allowed for all indices.
`cluster.routing.allocation.disable_new_allocation`:: `cluster.routing.allocation.disable_new_allocation`::
Allows to disable new primary allocations. Note, this will prevent deprecated[1.0.0.RC1,Replaced by `cluster.routing.allocation.enable`]
allocations for newly created indices. This setting really make
sense when dynamically updating it using the cluster update
settings API. This setting has been deprecated in favour
for `cluster.routing.allocation.enable`.
`cluster.routing.allocation.disable_allocation`:: `cluster.routing.allocation.disable_allocation`::
Allows to disable either primary or replica allocation (does not deprecated[1.0.0.RC1,Replaced by `cluster.routing.allocation.enable`]
apply to newly created primaries, see `disable_new_allocation`
above). Note, a replica will still be promoted to primary if
one does not exist. This setting really make sense when
dynamically updating it using the cluster update settings API.
This setting has been deprecated in favour for `cluster.routing.allocation.enable`.
`cluster.routing.allocation.disable_replica_allocation`:: `cluster.routing.allocation.disable_replica_allocation`::
Allows to disable only replica allocation. Similar to the previous deprecated[1.0.0.RC1,Replaced by `cluster.routing.allocation.enable`]
setting, mainly make sense when using it dynamically using the
cluster update settings API. This setting has been deprecated in
favour for `cluster.routing.allocation.enable`.
`cluster.routing.allocation.same_shard.host`:: `cluster.routing.allocation.same_shard.host`::
Prevents that multiple instances of the same shard are allocated Prevents that multiple instances of the same shard are allocated
on a single host. Defaults to `false`. This setting only applies on a single host. Defaults to `false`. This setting only applies
if multiple nodes are started on the same machine. if multiple nodes are started on the same machine.
`indices.recovery.concurrent_streams`:: `indices.recovery.concurrent_streams`::
The number of streams to open (on a *node* level) to recover a The number of streams to open (on a *node* level) to recover a
shard from a peer shard. Defaults to `3`. shard from a peer shard. Defaults to `3`.
[float] [float]
[[allocation-awareness]] [[allocation-awareness]]
@ -182,8 +169,8 @@ set to `value1` and `value2` by setting
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
curl -XPUT localhost:9200/test/_settings -d '{ curl -XPUT localhost:9200/test/_settings -d '{
"index.routing.allocation.include.tag" : "value1,value2" "index.routing.allocation.include.tag" : "value1,value2"
}' }'
-------------------------------------------------- --------------------------------------------------
On the other hand, we can create an index that will be deployed on all On the other hand, we can create an index that will be deployed on all
@ -193,11 +180,11 @@ nodes except for nodes with a `tag` of value `value3` by setting
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
curl -XPUT localhost:9200/test/_settings -d '{ curl -XPUT localhost:9200/test/_settings -d '{
"index.routing.allocation.exclude.tag" : "value3" "index.routing.allocation.exclude.tag" : "value3"
}' }'
-------------------------------------------------- --------------------------------------------------
`index.routing.allocation.require.*` can be used to `index.routing.allocation.require.*` can be used to
specify a number of rules, all of which MUST match in order for a shard specify a number of rules, all of which MUST match in order for a shard
to be allocated to a node. This is in contrast to `include` which will to be allocated to a node. This is in contrast to `include` which will
include a node if ANY rule matches. include a node if ANY rule matches.
@ -206,7 +193,8 @@ The `include`, `exclude` and `require` values can have generic simple
matching wildcards, for example, `value1*`. A special attribute name matching wildcards, for example, `value1*`. A special attribute name
called `_ip` can be used to match on node ip values. In addition `_host` called `_ip` can be used to match on node ip values. In addition `_host`
attribute can be used to match on either the node's hostname or its ip attribute can be used to match on either the node's hostname or its ip
address. address. Similarly `_name` and `_id` attributes can be used to match on
node name and node id accordingly.
Obviously a node can have several attributes associated with it, and Obviously a node can have several attributes associated with it, and
both the attribute name and value are controlled in the setting. For both the attribute name and value are controlled in the setting. For
@ -228,7 +216,7 @@ curl -XPUT localhost:9200/test/_settings -d '{
"index.routing.allocation.include.group2" : "yyy", "index.routing.allocation.include.group2" : "yyy",
"index.routing.allocation.exclude.group3" : "zzz", "index.routing.allocation.exclude.group3" : "zzz",
"index.routing.allocation.require.group4" : "aaa" "index.routing.allocation.require.group4" : "aaa"
}' }'
-------------------------------------------------- --------------------------------------------------
The provided settings can also be updated in real time using the update The provided settings can also be updated in real time using the update
@ -245,6 +233,6 @@ address:
curl -XPUT localhost:9200/_cluster/settings -d '{ curl -XPUT localhost:9200/_cluster/settings -d '{
"transient" : { "transient" : {
"cluster.routing.allocation.exclude._ip" : "10.0.0.1" "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
} }
}' }'
-------------------------------------------------- --------------------------------------------------

View File

@ -71,6 +71,18 @@ on all data and master nodes. The following settings are supported:
using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size). using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size).
[float]
===== Read-only URL Repository
The URL repository (`"type": "url"`) can be used as an alternative read-only way to access data created by shared file
system repository is using shared file system to store snapshot. The URL specified in the `url` parameter should
point to the root of the shared filesystem repository. The following settings are supported:
[horizontal]
`url`:: Location of the snapshots. Mandatory.
`concurrent_streams`:: Throttles the number of streams (per node) preforming snapshot operation. Defaults to `5`
[float] [float]
=== Snapshot === Snapshot
@ -101,7 +113,8 @@ supports <<search-multi-index-type,multi index syntax>>. The snapshot request al
`ignore_unavailable` option. Setting it to `true` will cause indices that do not exists to be ignored during snapshot `ignore_unavailable` option. Setting it to `true` will cause indices that do not exists to be ignored during snapshot
creation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail. creation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.
By setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of By setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of
the snapshot. the snapshot. By default, entire snapshot will fail if one or more indices participating in the snapshot don't have
all primary shards available. This behaviour can be changed by setting `partial` to `true`.
The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses
the list of the index files that are already stored in the repository and copies only files that were created or the list of the index files that are already stored in the repository and copies only files that were created or
@ -169,7 +182,7 @@ http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendRepl
----------------------------------- -----------------------------------
$ curl -XPOST "localhost:9200/_snapshot/my_backup/snapshot_1/_restore" -d '{ $ curl -XPOST "localhost:9200/_snapshot/my_backup/snapshot_1/_restore" -d '{
"indices": "index_1,index_2", "indices": "index_1,index_2",
"ignore_unavailable": "missing", "ignore_unavailable": "true",
"include_global_state": false, "include_global_state": false,
"rename_pattern": "index_(.)+", "rename_pattern": "index_(.)+",
"rename_replacement": "restored_index_$1" "rename_replacement": "restored_index_$1"

View File

@ -0,0 +1,60 @@
[[modules-tribe]]
== Tribe node
The _tribes_ feature allows a _tribe node_ to act as a federated client across
multiple clusters.
WARNING: This feature is EXPERIMENTAL -- use at your own risk.
The tribe node works by retrieving the cluster state from all connected
clusters and merging them into a global cluster state. With this information
at hand, it is able to perform read and write operations against the nodes in
all clusters as if they were local.
The `elasticsearch.yml` config file for a tribe node just needs to list the
clusters that should be joined, for instance:
[source,yaml]
--------------------------------
tribe:
t1: <1>
cluster.name: cluster_one
t2: <1>
cluster.name: cluster_two
--------------------------------
<1> `t1` and `t2` are aribitrary names representing the connection to each
cluster.
The example above configures connections to two clusters, name `t1` and `t2`
respectively. The tribe node will create a <<modules-node,node client>> to
connect each cluster using <<multicast,multicast discovery>> by default. Any
other settings for the connection can be configured under `tribe.{name}`, just
like the `cluster.name` in the example.
The merged global cluster state means that almost all operations work in the
same way as a single cluster: distributed search, suggest, percolation,
indexing, etc.
However, there are a few exceptions:
* The merged view cannot handle indices with the same name in multiple
clusters. It will pick one of them and discard the other.
* Master level read operations (eg <<cluster-state>>, <<cluster-health>>)
need to have the `local` flag set to `true` as the tribe node does not
have a single master node.
* Master level write operations (eg <<indices-create-index>>) are not
allowed. These should be performed on a single cluster.
The tribe node can be configured to block all write operations and all
metadata operations with:
[source,yaml]
--------------------------------
tribe:
blocks:
write: true
metadata: true
--------------------------------

View File

@ -1,7 +1,7 @@
[[setup-service-win]] [[setup-service-win]]
== Running as a Service on Windows == Running as a Service on Windows
Windows users can configure Elasticsearch to run as a service to run in the background or start automatically Windows users can configure Elasticsearch to run as a service to run in the background or start automatically
at startup without any user interaction. at startup without any user interaction.
This can be achieved through `service.bat` script under `bin/` folder which allows one to install, This can be achieved through `service.bat` script under `bin/` folder which allows one to install,
remove, manage or configure the service and potentially start and stop the service, all from the command-line. remove, manage or configure the service and potentially start and stop the service, all from the command-line.
@ -47,6 +47,7 @@ The service 'elasticsearch-service-x64' has been installed.
NOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as oppose to a server JVM which NOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as oppose to a server JVM which
offers better performance for long-running applications) its usage is discouraged and a warning will be issued. offers better performance for long-running applications) its usage is discouraged and a warning will be issued.
[float]
=== Customizing service settings === Customizing service settings
There are two ways to customize the service settings: There are two ways to customize the service settings:

View File

@ -3,6 +3,7 @@
In order to run elasticsearch as a service on your operating system, the provided packages try to make it as easy as possible for you to start and stop elasticsearch during reboot and upgrades. In order to run elasticsearch as a service on your operating system, the provided packages try to make it as easy as possible for you to start and stop elasticsearch during reboot and upgrades.
[float]
=== Linux === Linux
Currently our build automatically creates a debian package and an RPM package, which is available on the download page. The package itself does not have any dependencies, but you have to make sure that you installed a JDK. Currently our build automatically creates a debian package and an RPM package, which is available on the download page. The package itself does not have any dependencies, but you have to make sure that you installed a JDK.
@ -26,6 +27,7 @@ Each package features a configuration file, which allows you to set the followin
`ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"` `ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"`
`RESTART_ON_UPGRADE`:: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continouos shard reallocation resulting in high network traffic and reducing the response times of your cluster. `RESTART_ON_UPGRADE`:: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continouos shard reallocation resulting in high network traffic and reducing the response times of your cluster.
[float]
==== Debian/Ubuntu ==== Debian/Ubuntu
The debian package ships with everything you need as it uses standard debian tools like update `update-rc.d` to define the runlevels it runs on. The init script is placed at `/etc/init.d/elasticsearch` is you would expect it. The configuration file is placed at `/etc/default/elasticsearch`. The debian package ships with everything you need as it uses standard debian tools like update `update-rc.d` to define the runlevels it runs on. The init script is placed at `/etc/init.d/elasticsearch` is you would expect it. The configuration file is placed at `/etc/default/elasticsearch`.
@ -38,6 +40,7 @@ sudo update-rc.d elasticsearch defaults 95 10
sudo /etc/init.d/elasticsearch start sudo /etc/init.d/elasticsearch start
-------------------------------------------------- --------------------------------------------------
[float]
===== Installing the oracle JDK ===== Installing the oracle JDK
The usual recommendation is to run the Oracle JDK with elasticsearch. However Ubuntu and Debian only ship the OpenJDK due to license issues. You can easily install the oracle installer package though. In case you are missing the `add-apt-repository` command under Debian GNU/Linux, make sure have at least Debian Wheezy and the package `python-software-properties` installed The usual recommendation is to run the Oracle JDK with elasticsearch. However Ubuntu and Debian only ship the OpenJDK due to license issues. You can easily install the oracle installer package though. In case you are missing the `add-apt-repository` command under Debian GNU/Linux, make sure have at least Debian Wheezy and the package `python-software-properties` installed
@ -53,8 +56,10 @@ java -version
The last command should verify a successful installation of the Oracle JDK. The last command should verify a successful installation of the Oracle JDK.
==== RPM based distributions [float]
==== RPM based distributions
[float]
===== Using chkconfig ===== Using chkconfig
Some RPM based distributions are using `chkconfig` to enable and disable services. The init script is located at `/etc/init.d/elasticsearch`, where as the configuration file is placed at `/etc/sysconfig/elasticsearch`. Like the debian package the RPM package is not started by default after installation, you have to do this manually by entering the following commands Some RPM based distributions are using `chkconfig` to enable and disable services. The init script is located at `/etc/init.d/elasticsearch`, where as the configuration file is placed at `/etc/sysconfig/elasticsearch`. Like the debian package the RPM package is not started by default after installation, you have to do this manually by entering the following commands
@ -66,6 +71,7 @@ sudo service elasticsearch start
-------------------------------------------------- --------------------------------------------------
[float]
===== Using systemd ===== Using systemd
Distributions like SuSe do not use the `chkconfig` tool to register services, but rather `systemd` and its command `/bin/systemctl` to start and stop services (at least in newer versions, otherwise use the `chkconfig` commands above). The configuration file is also placed at `/etc/sysconfig/elasticsearch`. After installing the RPM, you have to change the systemd configuration and then start up elasticsearch Distributions like SuSe do not use the `chkconfig` tool to register services, but rather `systemd` and its command `/bin/systemctl` to start and stop services (at least in newer versions, otherwise use the `chkconfig` commands above). The configuration file is also placed at `/etc/sysconfig/elasticsearch`. After installing the RPM, you have to change the systemd configuration and then start up elasticsearch

View File

@ -6,6 +6,7 @@ We also have repositories available for APT and YUM based distributions.
We have split the major versions in separate urls to avoid accidental upgrades across major version. We have split the major versions in separate urls to avoid accidental upgrades across major version.
For all 0.90.x releases use 0.90 as version number, for 1.0.x use 1.0, etc. For all 0.90.x releases use 0.90 as version number, for 1.0.x use 1.0, etc.
[float]
=== APT === APT
Download and install the Public Signing Key Download and install the Public Signing Key
@ -25,6 +26,7 @@ deb http://packages.elasticsearch.org/elasticsearch/0.90/debian stable main
Run apt-get update and the repository is ready for use. Run apt-get update and the repository is ready for use.
[float]
=== YUM === YUM
Download and install the Public Signing Key Download and install the Public Signing Key

View File

@ -6,7 +6,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.elasticsearch</groupId> <groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId> <artifactId>elasticsearch</artifactId>
<version>1.0.0.RC1-SNAPSHOT</version> <version>1.0.0.RC1</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<description>Elasticsearch - Open Source, Distributed, RESTful Search Engine</description> <description>Elasticsearch - Open Source, Distributed, RESTful Search Engine</description>
<inceptionYear>2009</inceptionYear> <inceptionYear>2009</inceptionYear>

View File

@ -4,17 +4,17 @@
"methods": ["DELETE"], "methods": ["DELETE"],
"url": { "url": {
"path": "/{index}/_alias/{name}", "path": "/{index}/_alias/{name}",
"paths": ["/{index}/_alias/{name}"], "paths": ["/{index}/_alias/{name}", "/{index}/_aliases/{name}"],
"parts": { "parts": {
"index": { "index": {
"type" : "string", "type" : "list",
"required" : true, "required" : true,
"description" : "The name of the index with an alias" "description" : "A comma-separated list of index names (supports wildcards); use `_all` for all indices"
}, },
"name": { "name": {
"type" : "string", "type" : "list",
"required" : true, "required" : true,
"description" : "The name of the alias to be deleted" "description" : "A comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices."
} }
}, },
"params": { "params": {

View File

@ -4,17 +4,17 @@
"methods": ["DELETE"], "methods": ["DELETE"],
"url": { "url": {
"path": "/{index}/{type}/_mapping", "path": "/{index}/{type}/_mapping",
"paths": ["/{index}/{type}/_mapping", "/{index}/{type}"], "paths": ["/{index}/{type}/_mapping", "/{index}/{type}", "/{index}/_mapping/{type}", "/{index}/{type}/_mappings", "/{index}/_mappings/{type}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",
"required" : true, "required" : true,
"description" : "A comma-separated list of index names; use `_all` for all indices" "description" : "A comma-separated list of index names (supports wildcards); use `_all` for all indices"
}, },
"type": { "type": {
"type" : "string", "type" : "list",
"required" : true, "required" : true,
"description" : "The name of the document type to delete" "description" : "A comma-separated list of document types to delete (supports wildcards); use `_all` to delete all document types in the specified indices."
} }
}, },
"params": { "params": {

View File

@ -3,27 +3,27 @@
"documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-warmers.html", "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-warmers.html",
"methods": ["DELETE"], "methods": ["DELETE"],
"url": { "url": {
"path": "/{index}/_warmer", "path": "/{index}/_warmer/{name}",
"paths": ["/{index}/_warmer", "/{index}/_warmer/{name}", "/{index}/{type}/_warmer/{name}"], "paths": ["/{index}/_warmer", "/{index}/_warmer/{name}", "/{index}/_warmers", "/{index}/_warmers/{name}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",
"required" : true, "required" : true,
"description" : "A comma-separated list of index names to register warmer for; use `_all` or empty string to perform the operation on all indices" "description" : "A comma-separated list of index names to delete warmers from (supports wildcards); use `_all` to perform the operation on all indices."
}, },
"name" : { "name" : {
"type" : "string",
"description" : "The name of the warmer (supports wildcards); leave empty to delete all warmers"
},
"type": {
"type" : "list", "type" : "list",
"description" : "A comma-separated list of document types to register warmer for; use `_all` or empty string to perform the operation on all types" "description" : "A comma-separated list of warmer names to delete (supports wildcards); use `_all` to delete all warmers in the specified indices. You must specify a name either in the uri or in the parameters."
} }
}, },
"params": { "params": {
"master_timeout": { "master_timeout": {
"type" : "time", "type" : "time",
"description" : "Specify timeout for connection to master" "description" : "Specify timeout for connection to master"
},
"name" : {
"type" : "list",
"description" : "A comma-separated list of warmer names to delete (supports wildcards); use `_all` to delete all warmers in the specified indices. You must specify a name either in the uri or in the parameters."
} }
} }
}, },

View File

@ -3,8 +3,8 @@
"documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html", "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html",
"methods": ["GET"], "methods": ["GET"],
"url": { "url": {
"path": "/_alias/{name}", "path": "/_alias/",
"paths": ["/_alias/{name}", "/{index}/_alias/{name}", "/{index}/_alias"], "paths": [ "/_alias", "/_alias/{name}", "/{index}/_alias/{name}", "/{index}/_alias"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",

View File

@ -4,11 +4,15 @@
"methods": ["GET"], "methods": ["GET"],
"url": { "url": {
"path": "/_aliases", "path": "/_aliases",
"paths": ["/_aliases", "/{index}/_aliases"], "paths": ["/_aliases", "/{index}/_aliases", "/{index}/_aliases/{name}", "/_aliases/{name}" ],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",
"description" : "A comma-separated list of index names to filter aliases" "description" : "A comma-separated list of index names to filter aliases"
},
"name": {
"type" : "list",
"description" : "A comma-separated list of alias names to filter"
} }
}, },
"params": { "params": {

View File

@ -4,7 +4,7 @@
"methods": ["GET"], "methods": ["GET"],
"url": { "url": {
"path": "/_mapping/field/{field}", "path": "/_mapping/field/{field}",
"paths": ["/_mapping/field/{field}", "/{index}/_mapping/field/{field}", "/{index}/{type}/_mapping/field/{field}"], "paths": ["/_mapping/field/{field}", "/{index}/_mapping/field/{field}", "/_mapping/{type}/field/{field}", "/{index}/_mapping/{type}/field/{field}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",

View File

@ -4,7 +4,7 @@
"methods": ["GET"], "methods": ["GET"],
"url": { "url": {
"path": "/_mapping", "path": "/_mapping",
"paths": ["/_mapping", "/{index}/_mapping", "/{index}/{type}/_mapping"], "paths": ["/_mapping", "/{index}/_mapping", "/_mapping/{type}", "/{index}/_mapping/{type}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",

View File

@ -4,15 +4,15 @@
"methods": ["GET"], "methods": ["GET"],
"url": { "url": {
"path": "/_settings", "path": "/_settings",
"paths": ["/_settings", "/{index}/_settings", "/{index}/{prefix}/_settings"], "paths": ["/_settings", "/{index}/_settings", "/{index}/_settings/{name}", "/_settings/{name}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",
"description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
}, },
"prefix": { "name": {
"type" : "string", "type" : "string",
"description" : "The prefix all settings must have in order to be included" "description" : "The name of the settings that should be included"
} }
}, },
"params": { "params": {

View File

@ -3,8 +3,8 @@
"documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-warmers.html", "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-warmers.html",
"methods": ["GET"], "methods": ["GET"],
"url": { "url": {
"path": "/{index}/_warmer", "path": "/_warmer",
"paths": ["/{index}/_warmer", "/{index}/_warmer/{name}", "/{index}/{type}/_warmer/{name}"], "paths": [ "/_warmer", "/{index}/_warmer", "/{index}/_warmer/{name}", "/_warmer/{name}", "/{index}/{type}/_warmer/{name}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",

View File

@ -1,15 +1,14 @@
{ {
"indices.put_alias": { "indices.put_alias": {
"documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html", "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html",
"methods": ["PUT"], "methods": ["PUT", "POST"],
"url": { "url": {
"path": "/{index}/_alias/{name}", "path": "/{index}/_alias/{name}",
"paths": ["/{index}/_alias/{name}", "/_alias/{name}", "/{index}/_alias", "/_alias"], "paths": ["/{index}/_alias/{name}", "/_alias/{name}", "/{index}/_aliases/{name}", "/_aliases/{name}"],
"parts": { "parts": {
"index": { "index": {
"type" : "string", "type" : "list",
"required" : true, "description" : "A comma-separated list of index names the alias should point to (supports wildcards); use `_all` or omit to perform the operation on all indices."
"description" : "The name of the index with an alias"
}, },
"name": { "name": {
"type" : "string", "type" : "string",

View File

@ -4,12 +4,11 @@
"methods": ["PUT", "POST"], "methods": ["PUT", "POST"],
"url": { "url": {
"path": "/{index}/{type}/_mapping", "path": "/{index}/{type}/_mapping",
"paths": ["/{index}/{type}/_mapping"], "paths": ["/{index}/{type}/_mapping", "/{index}/_mapping/{type}", "/_mapping/{type}", "/{index}/{type}/_mappings", "/{index}/_mappings/{type}", "/_mappings/{type}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",
"required" : true, "description" : "A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices."
"description" : "A comma-separated list of index names; use `_all` to perform the operation on all indices"
}, },
"type": { "type": {
"type" : "string", "type" : "string",

View File

@ -1,15 +1,14 @@
{ {
"indices.put_warmer": { "indices.put_warmer": {
"documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-warmers.html", "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-warmers.html",
"methods": ["PUT"], "methods": ["PUT", "POST"],
"url": { "url": {
"path": "/{index}/_warmer/{name}", "path": "/{index}/_warmer/{name}",
"paths": ["/{index}/_warmer/{name}", "/{index}/{type}/_warmer/{name}"], "paths": ["/_warmer/{name}", "/{index}/_warmer/{name}", "/{index}/{type}/_warmer/{name}", "/_warmers/{name}", "/{index}/_warmers/{name}", "/{index}/{type}/_warmers/{name}"],
"parts": { "parts": {
"index": { "index": {
"type" : "list", "type" : "list",
"required" : true, "description" : "A comma-separated list of index names to register the warmer for; use `_all` or omit to perform the operation on all indices"
"description" : "A comma-separated list of index names to register the warmer for; use `_all` or empty string to perform the operation on all indices"
}, },
"name": { "name": {
"type" : "string", "type" : "string",

View File

@ -25,7 +25,8 @@
name: testali name: testali
- do: - do:
catch: missing
indices.get_alias: indices.get_alias:
index: testind index: testind
name: testali name: testali
- match: { '': {}}

View File

@ -0,0 +1,225 @@
---
setup:
- do:
indices.create:
index: test_index1
- do:
indices.create:
index: test_index2
- do:
indices.create:
index: foo
- do:
indices.put_alias:
name: alias1
body:
routing: "routing value"
- do:
indices.put_alias:
name: alias2
body:
routing: "routing value"
---
"check setup":
- do:
indices.get_alias:
name: alias1
- match: {test_index1.aliases.alias1.search_routing: "routing value"}
- match: {test_index2.aliases.alias1.search_routing: "routing value"}
- match: {foo.aliases.alias1.search_routing: "routing value"}
- do:
indices.get_alias:
name: alias2
- match: {test_index1.aliases.alias2.search_routing: "routing value"}
- match: {test_index2.aliases.alias2.search_routing: "routing value"}
- match: {foo.aliases.alias2.search_routing: "routing value"}
---
"check delete with _all index":
- do:
indices.delete_alias:
index: _all
name: alias1
- do:
catch: missing
indices.get_alias:
name: alias1
- do:
indices.get_alias:
name: alias2
- match: {test_index1.aliases.alias2.search_routing: "routing value"}
- match: {test_index2.aliases.alias2.search_routing: "routing value"}
- match: {foo.aliases.alias2.search_routing: "routing value"}
---
"check delete with * index":
- do:
indices.delete_alias:
index: "*"
name: alias1
- do:
catch: missing
indices.get_alias:
name: alias1
- do:
indices.get_alias:
name: alias2
- match: {test_index1.aliases.alias2.search_routing: "routing value"}
- match: {test_index2.aliases.alias2.search_routing: "routing value"}
- match: {foo.aliases.alias2.search_routing: "routing value"}
---
"check delete with index list":
- do:
indices.delete_alias:
index: "test_index1,test_index2"
name: alias1
- do:
indices.get_alias:
name: alias1
- match: {foo.aliases.alias1.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_alias:
name: alias2
- match: {test_index1.aliases.alias2.search_routing: "routing value"}
- match: {test_index2.aliases.alias2.search_routing: "routing value"}
- match: {foo.aliases.alias2.search_routing: "routing value"}
---
"check delete with prefix* index":
- do:
indices.delete_alias:
index: "test_*"
name: alias1
- do:
indices.get_alias:
name: alias1
- match: {foo.aliases.alias1.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_alias:
name: alias2
- match: {test_index1.aliases.alias2.search_routing: "routing value"}
- match: {test_index2.aliases.alias2.search_routing: "routing value"}
- match: {foo.aliases.alias2.search_routing: "routing value"}
---
"check delete with index list and * aliases":
- do:
indices.delete_alias:
index: "test_index1,test_index2"
name: "*"
- do:
indices.get_alias:
name: alias1
- match: {foo.aliases.alias1.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_alias:
name: alias2
- match: {foo.aliases.alias2.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
---
"check delete with index list and _all aliases":
- do:
indices.delete_alias:
index: "test_index1,test_index2"
name: _all
- do:
indices.get_alias:
name: alias1
- match: {foo.aliases.alias1.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_alias:
name: alias2
- match: {foo.aliases.alias2.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
---
"check delete with index list and wildcard aliases":
- do:
indices.delete_alias:
index: "test_index1,test_index2"
name: "*1"
- do:
indices.get_alias:
name: alias1
- match: {foo.aliases.alias1.search_routing: "routing value"}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_alias:
name: alias2
- match: {test_index1.aliases.alias2.search_routing: "routing value"}
- match: {test_index2.aliases.alias2.search_routing: "routing value"}
- match: {foo.aliases.alias2.search_routing: "routing value"}
---
"check 404 on no matching alias":
- do:
catch: missing
indices.delete_alias:
index: "*"
name: "non_existent"
- do:
catch: missing
indices.delete_alias:
index: "non_existent"
name: "alias1"
---
"check delete with blank index and blank alias":
- do:
catch: param
indices.delete_alias:
name: "alias1"
- do:
catch: param
indices.delete_alias:
index: "test_index1"

View File

@ -0,0 +1,286 @@
setup:
- do:
indices.create:
index: test_index1
body:
mappings: { test_type1: { }}
- do:
indices.create:
index: test_index2
body:
mappings: { test_type2: { }}
- do:
indices.create:
index: foo
body:
mappings: { test_type2: { }}
---
"delete with _all index":
- do:
indices.delete_mapping:
index: _all
type: test_type2
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_true: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_false: ''
---
"delete with * index":
- do:
indices.delete_mapping:
index: '*'
type: test_type2
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_true: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_false: ''
---
"delete with prefix* index":
- do:
indices.delete_mapping:
index: test*
type: test_type2
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_true: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_true: ''
---
"delete with list of indices":
- do:
indices.delete_mapping:
index: test_index1,test_index2
type: test_type2
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_true: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_true: ''
---
"delete with index list and _all type":
- do:
indices.delete_mapping:
index: test_index1,test_index2
type: _all
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_false: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_true: ''
---
"delete with index list and * type":
- do:
indices.delete_mapping:
index: test_index1,test_index2
type: '*'
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_false: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_true: ''
---
"delete with index list and prefix* type":
- do:
indices.delete_mapping:
index: test_index1,test_index2
type: '*2'
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_true: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_true: ''
---
"delete with index list and list of types":
- do:
indices.delete_mapping:
index: test_index1,test_index2
type: test_type1,test_type2
- do:
indices.exists_type:
index: test_index1
type: test_type1
- is_false: ''
- do:
indices.exists_type:
index: test_index2
type: test_type2
- is_false: ''
- do:
indices.exists_type:
index: foo
type: test_type2
- is_true: ''
---
"check 404 on no matching type":
- do:
catch: missing
indices.delete_mapping:
index: "*"
type: "non_existent"
- do:
catch: missing
indices.delete_mapping:
index: "non_existent"
type: "test_type1"
---
"check delete with blank index and blank alias":
- do:
catch: param
indices.delete_alias:
name: "alias1"
- do:
catch: param
indices.delete_alias:
index: "test_index1"
---
"check delete with blank index and blank type":
- do:
catch: param
indices.delete_mapping:
name: "test_type1"
- do:
catch: param
indices.delete_mapping:
index: "test_index1"

View File

@ -0,0 +1,204 @@
setup:
- do:
indices.create:
index: test_index1
- do:
indices.create:
index: test_index2
- do:
indices.create:
index: foo
- do:
indices.put_warmer:
index: "test_index1,test_index2,foo"
name: test_warmer1
body:
query:
match_all: {}
- do:
indices.put_warmer:
index: "test_index1,test_index2,foo"
name: test_warmer2
body:
query:
match_all: {}
---
"Check setup":
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.test_warmer1.source.query.match_all: {}}
- match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer1.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
- match: {foo.warmers.test_warmer1.source.query.match_all: {}}
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
---
"check delete with _all index":
- do:
indices.delete_warmer:
index: _all
name: test_warmer1
- do:
indices.get_warmer: {}
- match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
---
"check delete with * index":
- do:
indices.delete_warmer:
index: "*"
name: test_warmer1
- do:
indices.get_warmer: {}
- match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
---
"check delete with index list":
- do:
indices.delete_warmer:
index: "test_index1,test_index2"
name: test_warmer1
- do:
indices.get_warmer: { index: _all, name: 'test_warmer1' }
- match: {foo.warmers.test_warmer1.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_warmer: { index: _all, name: 'test_warmer2' }
- match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
---
"check delete with prefix* index":
- do:
indices.delete_warmer:
index: "test_*"
name: test_warmer1
- do:
indices.get_warmer: { index: _all, name: 'test_warmer1' }
- match: {foo.warmers.test_warmer1.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_warmer: { index: _all, name: 'test_warmer2' }
- match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
---
"check delete with index list and * warmers":
- do:
indices.delete_warmer:
index: "test_index1,test_index2"
name: "*"
- do:
indices.get_warmer: { index: _all, name: 'test_warmer1' }
- match: {foo.warmers.test_warmer1.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_warmer: { index: _all, name: 'test_warmer2' }
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
---
"check delete with index list and _all warmers":
- do:
indices.delete_warmer:
index: "test_index1,test_index2"
name: _all
- do:
indices.get_warmer: { index: _all, name: 'test_warmer1' }
- match: {foo.warmers.test_warmer1.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_warmer: { index: _all, name: 'test_warmer2' }
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
---
"check delete with index list and wildcard warmers":
- do:
indices.delete_warmer:
index: "test_index1,test_index2"
name: "*1"
- do:
indices.get_warmer: { index: _all, name: 'test_warmer1' }
- match: {foo.warmers.test_warmer1.source.query.match_all: {}}
- is_false: test_index1
- is_false: test_index2
- do:
indices.get_warmer: { index: _all, name: 'test_warmer2' }
- match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
- match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
- match: {foo.warmers.test_warmer2.source.query.match_all: {}}
---
"check 404 on no matching test_warmer":
- do:
catch: missing
indices.delete_warmer:
index: "*"
name: "non_existent"
- do:
catch: missing
indices.delete_warmer:
index: "non_existent"
name: "test_warmer1"
---
"check delete with blank index and blank test_warmer":
- do:
catch: param
indices.delete_warmer:
name: "test_warmer1"
- do:
catch: param
indices.delete_warmer:
index: "test_index1"

View File

@ -0,0 +1,211 @@
---
setup:
- do:
indices.create:
index: test_index
- do:
indices.create:
index: test_index_2
- do:
indices.put_alias:
index: test_index
name: test_alias
- do:
indices.put_alias:
index: test_index
name: test_blias
- do:
indices.put_alias:
index: test_index_2
name: test_alias
- do:
indices.put_alias:
index: test_index_2
name: test_blias
---
"Get all aliases via /_alias":
- do:
indices.get_alias: {}
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_blias: {}}
---
"Get all aliases via /{index}/_alias/":
- do:
indices.get_alias:
index: test_index
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get specific alias via /{index}/_alias/{name}":
- do:
indices.get_alias:
index: test_index
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2
---
"Get aliases via /{index}/_alias/_all":
- do:
indices.get_alias:
index: test_index
name: _all
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get aliases via /{index}/_alias/*":
- do:
indices.get_alias:
index: test_index
name: '*'
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get aliases via /{index}/_alias/prefix*":
- do:
indices.get_alias:
index: test_index
name: 'test_a*'
- match: {test_index.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2
---
"Get aliases via /{index}/_alias/name,name":
- do:
indices.get_alias:
index: test_index
name: 'test_alias,test_blias'
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get aliases via /_alias/{name}":
- do:
indices.get_alias:
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /_all/_alias/{name}":
- do:
indices.get_alias:
index: _all
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /*/_alias/{name}":
- do:
indices.get_alias:
index: '*'
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /pref*/_alias/{name}":
- do:
indices.get_alias:
index: '*2'
name: test_alias
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_alias
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /name,name/_alias/{name}":
- do:
indices.get_alias:
index: test_index,test_index_2
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Non-existent alias on an existing index returns an empty body":
- do:
indices.get_alias:
index: test_index
name: non-existent
- match: { '': {}}
---
"Existent and non-existent alias returns just the existing":
- do:
indices.get_alias:
index: test_index
name: test_alias,non-existent
- match: {test_index.aliases.test_alias: {}}
- is_false: test_index.aliases.non-existent
---
"Getting alias on an non-existent index should return 404":
- do:
catch: missing
indices.get_alias:
index: non-existent
name: foo

View File

@ -0,0 +1,214 @@
---
setup:
- do:
indices.create:
index: test_index
- do:
indices.create:
index: test_index_2
- do:
indices.put_alias:
index: test_index
name: test_alias
- do:
indices.put_alias:
index: test_index
name: test_blias
- do:
indices.put_alias:
index: test_index_2
name: test_alias
- do:
indices.put_alias:
index: test_index_2
name: test_blias
---
"Get all aliases via /_aliases":
- do:
indices.get_aliases: {}
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_blias: {}}
---
"Get all aliases via /{index}/_aliases/":
- do:
indices.get_aliases:
index: test_index
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get specific alias via /{index}/_aliases/{name}":
- do:
indices.get_aliases:
index: test_index
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2
---
"Get aliases via /{index}/_aliases/_all":
- do:
indices.get_aliases:
index: test_index
name: _all
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get aliases via /{index}/_aliases/*":
- do:
indices.get_aliases:
index: test_index
name: '*'
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get aliases via /{index}/_aliases/prefix*":
- do:
indices.get_aliases:
index: test_index
name: 'test_a*'
- match: {test_index.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2
---
"Get aliases via /{index}/_aliases/name,name":
- do:
indices.get_aliases:
index: test_index
name: 'test_alias,test_blias'
- match: {test_index.aliases.test_alias: {}}
- match: {test_index.aliases.test_blias: {}}
- is_false: test_index_2
---
"Get aliases via /_aliases/{name}":
- do:
indices.get_aliases:
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /_all/_aliases/{name}":
- do:
indices.get_aliases:
index: _all
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /*/_aliases/{name}":
- do:
indices.get_aliases:
index: '*'
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /pref*/_aliases/{name}":
- do:
indices.get_aliases:
index: '*2'
name: test_alias
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_alias
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Get aliases via /name,name/_aliases/{name}":
- do:
indices.get_aliases:
index: test_index,test_index_2
name: test_alias
- match: {test_index.aliases.test_alias: {}}
- match: {test_index_2.aliases.test_alias: {}}
- is_false: test_index.aliases.test_blias
- is_false: test_index_2.aliases.test_blias
---
"Non-existent alias on an existing index returns matching indcies":
- do:
indices.get_aliases:
index: test_index
name: non-existent
- match: { test_index.aliases: {}}
---
"Existent and non-existent alias returns just the existing":
- do:
indices.get_aliases:
index: test_index
name: test_alias,non-existent
- match: {test_index.aliases.test_alias: {}}
- is_false: test_index.aliases.non-existent
---
"Getting alias on an non-existent index should return 404":
- skip:
version: 1 - 999
reason: not implemented yet
- do:
catch: missing
indices.get_aliases:
index: non-existent
name: foo

View File

@ -17,7 +17,7 @@ setup:
indices.get_field_mapping: indices.get_field_mapping:
field: text field: text
- match: {test_index.test_type.text.mapping.text.type: string} - match: {test_index.mappings.test_type.text.mapping.text.type: string}
--- ---
"Get field mapping by index only": "Get field mapping by index only":
@ -26,7 +26,7 @@ setup:
index: test_index index: test_index
field: text field: text
- match: {test_index.test_type.text.mapping.text.type: string} - match: {test_index.mappings.test_type.text.mapping.text.type: string}
--- ---
"Get field mapping by type & field": "Get field mapping by type & field":
@ -37,7 +37,7 @@ setup:
type: test_type type: test_type
field: text field: text
- match: {test_index.test_type.text.mapping.text.type: string} - match: {test_index.mappings.test_type.text.mapping.text.type: string}
--- ---
"Get field mapping by type & field, with another field that doesn't exist": "Get field mapping by type & field, with another field that doesn't exist":
@ -48,8 +48,8 @@ setup:
type: test_type type: test_type
field: [ text , text1 ] field: [ text , text1 ]
- match: {test_index.test_type.text.mapping.text.type: string} - match: {test_index.mappings.test_type.text.mapping.text.type: string}
- is_false: test_index.test_type.text1 - is_false: test_index.mappings.test_type.text1
--- ---
"Get field mapping with include_defaults": "Get field mapping with include_defaults":
@ -61,5 +61,16 @@ setup:
field: text field: text
include_defaults: true include_defaults: true
- match: {test_index.test_type.text.mapping.text.type: string} - match: {test_index.mappings.test_type.text.mapping.text.type: string}
- match: {test_index.test_type.text.mapping.text.analyzer: default} - match: {test_index.mappings.test_type.text.mapping.text.analyzer: default}
---
"Get field mapping should work without index specifying type and field":
- do:
indices.get_field_mapping:
type: test_type
field: text
- match: {test_index.mappings.test_type.text.mapping.text.type: string}

View File

@ -23,6 +23,33 @@ setup:
type: string type: string
index_name: t3 index_name: t3
- do:
indices.create:
index: test_index_2
body:
mappings:
test_type_2:
properties:
t1:
type: string
t2:
type: string
obj:
path: just_name
properties:
t1:
type: string
i_t1:
type: string
index_name: t1
i_t3:
type: string
index_name: t3
- do:
cluster.health:
wait_for_status: yellow
--- ---
"Get field mapping with * for fields": "Get field mapping with * for fields":
@ -30,43 +57,92 @@ setup:
indices.get_field_mapping: indices.get_field_mapping:
field: "*" field: "*"
- match: {test_index.test_type.t1.full_name: t1 } - match: {test_index.mappings.test_type.t1.full_name: t1 }
- match: {test_index.test_type.t2.full_name: t2 } - match: {test_index.mappings.test_type.t2.full_name: t2 }
- match: {test_index.test_type.obj\.t1.full_name: obj.t1 } - match: {test_index.mappings.test_type.obj\.t1.full_name: obj.t1 }
- match: {test_index.test_type.obj\.i_t1.full_name: obj.i_t1 } - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 }
- match: {test_index.test_type.obj\.i_t3.full_name: obj.i_t3 } - match: {test_index.mappings.test_type.obj\.i_t3.full_name: obj.i_t3 }
--- ---
"Get field mapping with t* for fields": "Get field mapping with t* for fields":
- do: - do:
indices.get_field_mapping: indices.get_field_mapping:
index: test_index
field: "t*" field: "t*"
# i_t1 matches the pattern using it's index name, but t1 already means a full name # i_t1 matches the pattern using it's index name, but t1 already means a full name
# of a field and thus takes precedence. # of a field and thus takes precedence.
- match: {test_index.test_type.t1.full_name: t1 } - match: {test_index.mappings.test_type.t1.full_name: t1 }
- match: {test_index.test_type.t2.full_name: t2 } - match: {test_index.mappings.test_type.t2.full_name: t2 }
- match: {test_index.test_type.t3.full_name: obj.i_t3 } - match: {test_index.mappings.test_type.t3.full_name: obj.i_t3 }
- length: {test_index.test_type: 3} - length: {test_index.mappings.test_type: 3}
--- ---
"Get field mapping with *t1 for fields": "Get field mapping with *t1 for fields":
- do: - do:
indices.get_field_mapping: indices.get_field_mapping:
index: test_index
field: "*t1" field: "*t1"
- match: {test_index.test_type.t1.full_name: t1 } - match: {test_index.mappings.test_type.t1.full_name: t1 }
- match: {test_index.test_type.obj\.t1.full_name: obj.t1 } - match: {test_index.mappings.test_type.obj\.t1.full_name: obj.t1 }
- match: {test_index.test_type.obj\.i_t1.full_name: obj.i_t1 } - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 }
- length: {test_index.test_type: 3} - length: {test_index.mappings.test_type: 3}
--- ---
"Get field mapping with wildcarded relative names": "Get field mapping with wildcarded relative names":
- do: - do:
indices.get_field_mapping: indices.get_field_mapping:
index: test_index
field: "i_*" field: "i_*"
- match: {test_index.test_type.i_t1.full_name: obj.i_t1 } - match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
- match: {test_index.test_type.i_t3.full_name: obj.i_t3 } - match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
- length: {test_index.test_type: 2} - length: {test_index.mappings.test_type: 2}
---
"Get field mapping should work using '_all' for indices and types":
- do:
indices.get_field_mapping:
index: _all
type: _all
field: "i_*"
- match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
- match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
- length: {test_index.mappings.test_type: 2}
- match: {test_index_2.mappings.test_type_2.i_t1.full_name: obj.i_t1 }
- match: {test_index_2.mappings.test_type_2.i_t3.full_name: obj.i_t3 }
- length: {test_index_2.mappings.test_type_2: 2}
---
"Get field mapping should work using '*' for indices and types":
- do:
indices.get_field_mapping:
index: '*'
type: '*'
field: "i_*"
- match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
- match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
- length: {test_index.mappings.test_type: 2}
- match: {test_index_2.mappings.test_type_2.i_t1.full_name: obj.i_t1 }
- match: {test_index_2.mappings.test_type_2.i_t3.full_name: obj.i_t3 }
- length: {test_index_2.mappings.test_type_2: 2}
---
"Get field mapping should work using comma_separated values for indices and types":
- do:
indices.get_field_mapping:
index: 'test_index,test_index_2'
type: 'test_type,test_type_2'
field: "i_*"
- match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
- match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
- length: {test_index.mappings.test_type: 2}
- match: {test_index_2.mappings.test_type_2.i_t1.full_name: obj.i_t1 }
- match: {test_index_2.mappings.test_type_2.i_t3.full_name: obj.i_t3 }
- length: {test_index_2.mappings.test_type_2: 2}

View File

@ -2,31 +2,172 @@
setup: setup:
- do: - do:
indices.create: indices.create:
index: test_index index: test_1
body: body:
mappings: mappings:
test_type: type_1: {}
properties: type_2: {}
text: - do:
type: string indices.create:
analyzer: whitespace index: test_2
body:
mappings:
type_2: {}
type_3: {}
--- ---
"Get index mapping": "Get /_mapping":
- do:
indices.get_mapping:
index: test_index
- match: {test_index.test_type.properties.text.type: string} - do:
- match: {test_index.test_type.properties.text.analyzer: whitespace} indices.get_mapping: {}
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_3.properties: {}}
--- ---
"Get type mapping": "Get /{index}/_mapping":
- do: - do:
indices.get_mapping: indices.get_mapping:
index: test_index index: test_1
type: test_type
- match: {test_index.test_type.properties.text.type: string} - match: { test_1.mappings.type_1.properties: {}}
- match: {test_index.test_type.properties.text.analyzer: whitespace} - match: { test_1.mappings.type_2.properties: {}}
- is_false: test_2
---
"Get /{index}/_mapping/_all":
- do:
indices.get_mapping:
index: test_1
type: _all
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- is_false: test_2
---
"Get /{index}/_mapping/*":
- do:
indices.get_mapping:
index: test_1
type: '*'
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- is_false: test_2
---
"Get /{index}/_mapping/{type}":
- do:
indices.get_mapping:
index: test_1
type: type_1
- match: { test_1.mappings.type_1.properties: {}}
- is_false: test_1.mappings.type_2
- is_false: test_2
---
"Get /{index}/_mapping/{type,type}":
- do:
indices.get_mapping:
index: test_1
type: type_1,type_2
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- is_false: test_2
---
"Get /{index}/_mapping/{type*}":
- do:
indices.get_mapping:
index: test_1
type: '*2'
- match: { test_1.mappings.type_2.properties: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2
---
"Get /_mapping/{type}":
- do:
indices.get_mapping:
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2.mappings.type_3
---
"Get /_all/_mapping/{type}":
- do:
indices.get_mapping:
index: _all
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2.mappings.type_3
---
"Get /*/_mapping/{type}":
- do:
indices.get_mapping:
index: '*'
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2.mappings.type_3
---
"Get /{index}/_mapping/{type}":
- do:
indices.get_mapping:
index: test_2
type: type_2
- match: { test_2.mappings.type_2.properties: {}}
- is_false: test_1
- is_false: test_2.mappings.type_3
---
"Get /index,index/_mapping/{type}":
- do:
indices.get_mapping:
index: test_1,test_2
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- is_false: test_2.mappings.type_3
---
"Get /index*/_mapping/{type}":
- do:
indices.get_mapping:
index: '*2'
type: type_2
- match: { test_2.mappings.type_2.properties: {}}
- is_false: test_1
- is_false: test_2.mappings.type_3

View File

@ -1,5 +1,5 @@
--- ---
"Raise 404 when type doesn't exist": "Return empty response when type doesn't exist":
- do: - do:
indices.create: indices.create:
index: test_index index: test_index
@ -12,8 +12,8 @@
analyzer: whitespace analyzer: whitespace
- do: - do:
catch: missing
indices.get_mapping: indices.get_mapping:
index: test_index index: test_index
type: not_test_type type: not_test_type
- match: { '': {}}

View File

@ -0,0 +1,26 @@
---
"Getting mapping for aliases should return the real index as key":
- do:
indices.create:
index: test_index
body:
mappings:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.put_alias:
index: test_index
name: test_alias
- do:
indices.get_mapping:
index: test_alias
- match: {test_index.mappings.test_type.properties.text.type: string}
- match: {test_index.mappings.test_type.properties.text.analyzer: whitespace}

View File

@ -1,32 +1,167 @@
--- ---
"Test get indices settings": setup:
- do: - do:
indices.create: indices.create:
index: test-index index: test_1
body:
settings:
index:
refresh_interval: -1
number_of_shards: 2
number_of_replicas: 3
- do: - do:
indices.get_settings: indices.create:
index: test-index index: test_2
- match: ---
test-index.settings.index.number_of_replicas: "3" "Get /_settings":
- match:
test-index.settings.index.number_of_shards: "2"
- match:
test-index.settings.index.refresh_interval: "-1"
- do: - do:
indices.get_settings: indices.get_settings: {}
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_1.settings.index.number_of_replicas: "1"}
- match: { test_2.settings.index.number_of_shards: "5"}
- match: { test_2.settings.index.number_of_replicas: "1"}
---
"Get /{index}/_settings":
- do:
indices.get_settings:
index: test_1
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_1.settings.index.number_of_replicas: "1"}
- is_false: test_2
---
"Get /{index}/_settings/_all":
- do:
indices.get_settings:
index: test_1
name: _all
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_1.settings.index.number_of_replicas: "1"}
- is_false: test_2
---
"Get /{index}/_settings/*":
- do:
indices.get_settings:
index: test_1
name: '*'
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_1.settings.index.number_of_replicas: "1"}
- is_false: test_2
---
"Get /{index}/_settings/{name}":
- do:
indices.get_settings:
index: test_1
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2
---
"Get /{index}/_settings/{name,name}":
- do:
indices.get_settings:
index: test_1
name: index.number_of_shards,index.number_of_replicas
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_1.settings.index.number_of_replicas: "1"}
- is_false: test_2
---
"Get /{index}/_settings/{name*}":
- do:
indices.get_settings:
index: test_1
name: 'index.number_of_s*'
- match: { test_1.settings.index.number_of_shards: "5"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2
---
"Get /_settings/{name}":
- do:
indices.get_settings:
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_2.settings.index.number_of_shards: "5"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
---
"Get /_all/_settings/{name}":
- do:
indices.get_settings:
index: _all index: _all
prefix: index.number name: index.number_of_shards
- match: - match: { test_1.settings.index.number_of_shards: "5"}
test-index.settings.index.number_of_replicas: "3" - match: { test_2.settings.index.number_of_shards: "5"}
- match: - is_false: test_1.settings.index.number_of_replicas
test-index.settings.index.number_of_shards: "2" - is_false: test_2.settings.index.number_of_replicas
---
"Get /*/_settings/{name}":
- do:
indices.get_settings:
index: '*'
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_2.settings.index.number_of_shards: "5"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
---
"Get /{index}/_settings/{name}":
- do:
indices.get_settings:
index: test_1
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2
---
"Get /index,index/_settings/{name}":
- do:
indices.get_settings:
index: test_1,test_2
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_2.settings.index.number_of_shards: "5"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
---
"Get /index*/_settings/{name}":
- do:
indices.get_settings:
index: '*2'
name: index.number_of_shards
- match: { test_2.settings.index.number_of_shards: "5"}
- is_false: test_1
- is_false: test_2.settings.index.number_of_replicas

View File

@ -0,0 +1,26 @@
---
"Getting settings for aliases should return the real index as key":
- do:
indices.create:
index: test-index
body:
settings:
index:
refresh_interval: -1
number_of_shards: 2
number_of_replicas: 3
- do:
indices.put_alias:
index: test-index
name: test-alias
- do:
indices.get_settings:
index: test-alias
- match: { test-index.settings.index.number_of_replicas: "3" }
- match: { test-index.settings.index.number_of_shards: "2" }
- match: { test-index.settings.index.refresh_interval: "-1" }

View File

@ -0,0 +1,215 @@
---
setup:
- do:
indices.create:
index: test_1
- do:
indices.create:
index: test_2
- do:
cluster.health:
wait_for_status: yellow
- do:
indices.put_warmer:
index: test_1
name: warmer_1
body: { query: { match_all: { }}}
- do:
indices.put_warmer:
index: test_1
name: warmer_2
body: { query: { match_all: { }}}
- do:
indices.put_warmer:
index: test_2
name: warmer_2
body: { query: { match_all: { }}}
- do:
indices.put_warmer:
index: test_2
name: warmer_3
body: { query: { match_all: { }}}
- do:
indices.refresh: {}
---
"Get /_warmer":
- do:
indices.get_warmer: {}
- match: { test_1.warmers.warmer_1.source.query.match_all: {}}
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- match: { test_2.warmers.warmer_3.source.query.match_all: {}}
---
"Get /{index}/_warmer":
- do:
indices.get_warmer:
index: test_1
- match: { test_1.warmers.warmer_1.source.query.match_all: {}}
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_2
---
"Get /{index}/_warmer/_all":
- do:
indices.get_warmer:
index: test_1
name: _all
- match: { test_1.warmers.warmer_1.source.query.match_all: {}}
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_2
---
"Get /{index}/_warmer/*":
- do:
indices.get_warmer:
index: test_1
name: '*'
- match: { test_1.warmers.warmer_1.source.query.match_all: {}}
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_2
---
"Get /{index}/_warmer/{name}":
- do:
indices.get_warmer:
index: test_1
name: warmer_1
- match: { test_1.warmers.warmer_1.source.query.match_all: {}}
- is_false: test_1.warmers.warmer_2
- is_false: test_2
---
"Get /{index}/_warmer/{name,name}":
- do:
indices.get_warmer:
index: test_1
name: warmer_1,warmer_2
- match: { test_1.warmers.warmer_1.source.query.match_all: {}}
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_2
---
"Get /{index}/_warmer/{name*}":
- do:
indices.get_warmer:
index: test_1
name: '*2'
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_1.warmers.warmer_1
- is_false: test_2
---
"Get /_warmer/{name}":
- do:
indices.get_warmer:
name: warmer_2
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_1.warmers.warmer_1
- is_false: test_2.warmers.warmer_3
---
"Get /_all/_warmer/{name}":
- do:
indices.get_warmer:
index: _all
name: warmer_2
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_1.warmers.warmer_1
- is_false: test_2.warmers.warmer_3
---
"Get /*/_warmer/{name}":
- do:
indices.get_warmer:
index: '*'
name: warmer_2
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_1.warmers.warmer_1
- is_false: test_2.warmers.warmer_3
---
"Get /{index}/_warmer/{name}":
- do:
indices.get_warmer:
index: test_2
name: warmer_2
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_1
- is_false: test_2.warmers.warmer_3
---
"Get /index,index/_warmer/{name}":
- do:
indices.get_warmer:
index: test_1,test_2
name: warmer_2
- match: { test_1.warmers.warmer_2.source.query.match_all: {}}
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_2.warmers.warmer_3
---
"Get /index*/_warmer/{name}":
- do:
indices.get_warmer:
index: '*2'
name: warmer_2
- match: { test_2.warmers.warmer_2.source.query.match_all: {}}
- is_false: test_1
- is_false: test_2.warmers.warmer_3
---
"Empty response when no matching warmer":
- do:
indices.get_warmer:
index: '*'
name: non_existent
- match: { '': {}}
---
"Throw 404 on missing index":
- do:
catch: missing
indices.get_warmer:
index: non_existent
name: '*'

View File

@ -0,0 +1,127 @@
---
setup:
# create three indices
- do:
indices.create:
index: test_index1
- do:
indices.create:
index: test_index2
- do:
indices.create:
index: foo
---
"put alias per index":
- do:
indices.put_alias:
index: test_index1
name: alias
- do:
indices.put_alias:
index: test_index2
name: alias
- do:
indices.get_alias:
name: alias
- match: {test_index1.aliases.alias: {}}
- match: {test_index2.aliases.alias: {}}
- is_false: foo
---
"put alias in _all index":
- do:
indices.put_alias:
index: _all
name: alias
- do:
indices.get_alias:
name: alias
- match: {test_index1.aliases.alias: {}}
- match: {test_index2.aliases.alias: {}}
- match: {foo.aliases.alias: {}}
---
"put alias in * index":
- do:
indices.put_alias:
index: '*'
name: alias
- do:
indices.get_alias:
name: alias
- match: {test_index1.aliases.alias: {}}
- match: {test_index2.aliases.alias: {}}
- match: {foo.aliases.alias: {}}
---
"put alias prefix* index":
- do:
indices.put_alias:
index: "test_*"
name: alias
- do:
indices.get_alias:
name: alias
- match: {test_index1.aliases.alias: {}}
- match: {test_index2.aliases.alias: {}}
- is_false: foo
---
"put alias in list of indices":
- do:
indices.put_alias:
index: "test_index1,test_index2"
name: alias
- do:
indices.get_alias:
name: alias
- match: {test_index1.aliases.alias: {}}
- match: {test_index2.aliases.alias: {}}
- is_false: foo
---
"put alias with blank index":
- do:
indices.put_alias:
name: alias
- do:
indices.get_alias:
name: alias
- match: {test_index1.aliases.alias: {}}
- match: {test_index2.aliases.alias: {}}
- match: {foo.aliases.alias: {}}
---
"put alias with mising name":
- do:
catch: param
indices.put_alias: {}

View File

@ -22,10 +22,10 @@
indices.get_mapping: indices.get_mapping:
index: test_index index: test_index
- match: {test_index.test_type.properties.text1.type: string} - match: {test_index.mappings.test_type.properties.text1.type: string}
- match: {test_index.test_type.properties.text1.analyzer: whitespace} - match: {test_index.mappings.test_type.properties.text1.analyzer: whitespace}
- match: {test_index.test_type.properties.text2.type: string} - match: {test_index.mappings.test_type.properties.text2.type: string}
- match: {test_index.test_type.properties.text2.analyzer: whitespace} - match: {test_index.mappings.test_type.properties.text2.analyzer: whitespace}
- do: - do:
indices.put_mapping: indices.put_mapping:
@ -56,7 +56,7 @@
indices.get_mapping: indices.get_mapping:
index: test_index index: test_index
- match: {test_index.test_type.properties.text1.type: string} - match: {test_index.mappings.test_type.properties.text1.type: string}
- match: {test_index.test_type.properties.text1.fields.text_raw.index: not_analyzed} - match: {test_index.mappings.test_type.properties.text1.fields.text_raw.index: not_analyzed}
- match: {test_index.test_type.properties.text2.type: string} - match: {test_index.mappings.test_type.properties.text2.type: string}
- match: {test_index.test_type.properties.text2.fields.text_raw.index: not_analyzed} - match: {test_index.mappings.test_type.properties.text2.fields.text_raw.index: not_analyzed}

View File

@ -0,0 +1,178 @@
setup:
- do:
indices.create:
index: test_index1
- do:
indices.create:
index: test_index2
- do:
indices.create:
index: foo
---
"put one mapping per index":
- do:
indices.put_mapping:
index: test_index1
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.put_mapping:
index: test_index2
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.get_mapping: {}
- match: {test_index1.mappings.test_type.properties.text.type: string}
- match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
- match: {test_index2.mappings.test_type.properties.text.type: string}
- match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
- is_false: foo
---
"put mapping in _all index":
- do:
indices.put_mapping:
index: _all
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.get_mapping: {}
- match: {test_index1.mappings.test_type.properties.text.type: string}
- match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
- match: {test_index2.mappings.test_type.properties.text.type: string}
- match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
- match: {foo.mappings.test_type.properties.text.type: string}
- match: {foo.mappings.test_type.properties.text.analyzer: whitespace}
---
"put mapping in * index":
- do:
indices.put_mapping:
index: "*"
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.get_mapping: {}
- match: {test_index1.mappings.test_type.properties.text.type: string}
- match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
- match: {test_index2.mappings.test_type.properties.text.type: string}
- match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
- match: {foo.mappings.test_type.properties.text.type: string}
- match: {foo.mappings.test_type.properties.text.analyzer: whitespace}
---
"put mapping in prefix* index":
- do:
indices.put_mapping:
index: "test_index*"
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.get_mapping: {}
- match: {test_index1.mappings.test_type.properties.text.type: string}
- match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
- match: {test_index2.mappings.test_type.properties.text.type: string}
- match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
- is_false: foo
---
"put mapping in list of indices":
- do:
indices.put_mapping:
index: [test_index1, test_index2]
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.get_mapping: {}
- match: {test_index1.mappings.test_type.properties.text.type: string}
- match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
- match: {test_index2.mappings.test_type.properties.text.type: string}
- match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
- is_false: foo
---
"put mapping with blank index":
- do:
indices.put_mapping:
type: test_type
body:
test_type:
properties:
text:
type: string
analyzer: whitespace
- do:
indices.get_mapping: {}
- match: {test_index1.mappings.test_type.properties.text.type: string}
- match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
- match: {test_index2.mappings.test_type.properties.text.type: string}
- match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
- match: {foo.mappings.test_type.properties.text.type: string}
- match: {foo.mappings.test_type.properties.text.analyzer: whitespace}
---
"put mapping with mising type":
- do:
catch: param
indices.put_mapping: {}

View File

@ -0,0 +1,113 @@
setup:
- do:
indices.create:
index: test_index1
- do:
indices.create:
index: test_index2
- do:
indices.create:
index: foo
---
"put settings per index":
- do:
indices.put_settings:
index: test_index1
body:
refresh_interval: 1s
- do:
indices.put_settings:
index: test_index2
body:
refresh_interval: 1s
- do:
indices.get_settings: {}
- match: {test_index1.settings.index.refresh_interval: 1s}
- match: {test_index2.settings.index.refresh_interval: 1s}
- is_false: foo.settings.index.refresh_interval
---
"put settings in _all index":
- do:
indices.put_settings:
index: _all
body:
refresh_interval: 1s
- do:
indices.get_settings: {}
- match: {test_index1.settings.index.refresh_interval: 1s}
- match: {test_index2.settings.index.refresh_interval: 1s}
- match: {foo.settings.index.refresh_interval: 1s}
---
"put settings in * index":
- do:
indices.put_settings:
index: '*'
body:
refresh_interval: 1s
- do:
indices.get_settings: {}
- match: {test_index1.settings.index.refresh_interval: 1s}
- match: {test_index2.settings.index.refresh_interval: 1s}
- match: {foo.settings.index.refresh_interval: 1s}
---
"put settings in prefix* index":
- do:
indices.put_settings:
index: 'test*'
body:
refresh_interval: 1s
- do:
indices.get_settings: {}
- match: {test_index1.settings.index.refresh_interval: 1s}
- match: {test_index2.settings.index.refresh_interval: 1s}
- is_false: foo.settings.index.refresh_interval
---
"put settings in list of indices":
- skip:
version: 1 - 999
reason: list of indices not implemented yet
- do:
indices.put_settings:
index: test_index1, test_index2
body:
refresh_interval: 1s
- do:
indices.get_settings: {}
- match: {test_index1.settings.index.refresh_interval: 1s}
- match: {test_index2.settings.index.refresh_interval: 1s}
- is_false: foo.settings.index.refresh_interval
---
"put settings in blank index":
- do:
indices.put_settings:
body:
refresh_interval: 1s
- do:
indices.get_settings: {}
- match: {test_index1.settings.index.refresh_interval: 1s}
- match: {test_index2.settings.index.refresh_interval: 1s}
- match: {foo.settings.index.refresh_interval: 1s}

View File

@ -1,18 +1,24 @@
--- ---
"Basic test for warmers": setup:
- do: - do:
indices.create: indices.create:
index: test_index index: test_index
- do:
indices.create:
index: test_idx
- do: - do:
cluster.health: cluster.health:
wait_for_status: yellow wait_for_status: yellow
- do: - do:
catch: missing indices.put_warmer:
indices.get_warmer: index: test_idx
index: test_index name: test_warmer2
name: test_warmer body:
query:
match_all: {}
- do: - do:
indices.put_warmer: indices.put_warmer:
@ -22,6 +28,8 @@
query: query:
match_all: {} match_all: {}
---
"Basic test for warmers":
- do: - do:
indices.get_warmer: indices.get_warmer:
index: test_index index: test_index
@ -32,9 +40,106 @@
- do: - do:
indices.delete_warmer: indices.delete_warmer:
index: test_index index: test_index
name: test_warmer
- do:
indices.get_warmer:
index: test_index
name: test_warmer
- match: { '': {}}
---
"Getting all warmers via /_warmer should work":
- do:
indices.get_warmer: {}
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
---
"Getting warmers for several indices should work using *":
- do:
indices.get_warmer:
index: '*'
name: '*'
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
---
"Getting warmers for several indices should work using _all":
- do:
indices.get_warmer:
index: _all
name: _all
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
---
"Getting all warmers without specifying index should work":
- do:
indices.get_warmer:
name: _all
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
---
"Getting warmers for several indices should work using prefix*":
- do:
indices.get_warmer:
index: test_i*
name: test_w*
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
---
"Getting warmers for several indices should work using comma-separated lists":
- do:
indices.get_warmer:
index: test_index,test_idx
name: test_warmer,test_warmer2
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
---
"Getting a non-existent warmer on an existing index should return an empty body":
- do:
indices.get_warmer:
index: test_index
name: non-existent
- match: { '': {}}
---
"Getting an existent and non-existent warmer should return the existent and no data about the non-existent warmer":
- do:
indices.get_warmer:
index: test_index
name: test_warmer,non-existent
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}
- is_false: test_index.warmers.non-existent
---
"Getting warmer on an non-existent index should return 404":
- do: - do:
catch: missing catch: missing
indices.get_warmer: indices.get_warmer:
index: test_index index: non-existent
name: test_warmer name: foo

View File

@ -0,0 +1,30 @@
---
"Getting warmer for aliases should return the real index as key":
- do:
indices.create:
index: test_index
- do:
cluster.health:
wait_for_status: yellow
- do:
indices.put_warmer:
index: test_index
name: test_warmer
body:
query:
match_all: {}
- do:
indices.put_alias:
index: test_index
name: test_alias
- do:
indices.get_warmer:
index: test_alias
- match: {test_index.warmers.test_warmer.source.query.match_all: {}}

View File

@ -0,0 +1,128 @@
---
setup:
- do:
indices.create:
index: test_index1
- do:
indices.create:
index: test_index2
- do:
indices.create:
index: foo
---
"put warmer per index":
- do:
indices.put_warmer:
index: test_index1
name: warmer
body:
query:
match_all: {}
- do:
indices.put_warmer:
index: test_index2
name: warmer
body:
query:
match_all: {}
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.warmer.source.query.match_all: {}}
- match: {test_index2.warmers.warmer.source.query.match_all: {}}
- is_false: foo
---
"put warmer in _all index":
- do:
indices.put_warmer:
index: _all
name: warmer
body:
query:
match_all: {}
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.warmer.source.query.match_all: {}}
- match: {test_index2.warmers.warmer.source.query.match_all: {}}
- match: {foo.warmers.warmer.source.query.match_all: {}}
---
"put warmer in * index":
- do:
indices.put_warmer:
index: "*"
name: warmer
body:
query:
match_all: {}
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.warmer.source.query.match_all: {}}
- match: {test_index2.warmers.warmer.source.query.match_all: {}}
- match: {foo.warmers.warmer.source.query.match_all: {}}
---
"put warmer prefix* index":
- do:
indices.put_warmer:
index: "test_index*"
name: warmer
body:
query:
match_all: {}
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.warmer.source.query.match_all: {}}
- match: {test_index2.warmers.warmer.source.query.match_all: {}}
- is_false: foo
---
"put warmer in list of indices":
- do:
indices.put_warmer:
index: [test_index1, test_index2]
name: warmer
body:
query:
match_all: {}
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.warmer.source.query.match_all: {}}
- match: {test_index2.warmers.warmer.source.query.match_all: {}}
- is_false: foo
---
"put warmer with blank index":
- do:
indices.put_warmer:
name: warmer
body:
query:
match_all: {}
- do:
indices.get_warmer: { index: _all, name: '*' }
- match: {test_index1.warmers.warmer.source.query.match_all: {}}
- match: {test_index2.warmers.warmer.source.query.match_all: {}}
- match: {foo.warmers.warmer.source.query.match_all: {}}
---
"put warmer with mising name":
- do:
catch: param
indices.put_warmer: {}

View File

@ -148,7 +148,7 @@ public class Version implements Serializable {
public static final int V_1_0_0_Beta2_ID = /*00*/1000002; public static final int V_1_0_0_Beta2_ID = /*00*/1000002;
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, false, org.apache.lucene.util.Version.LUCENE_46); public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, false, org.apache.lucene.util.Version.LUCENE_46);
public static final int V_1_0_0_RC1_ID = /*00*/1000051; public static final int V_1_0_0_RC1_ID = /*00*/1000051;
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, true, org.apache.lucene.util.Version.LUCENE_46); public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_46);
public static final Version CURRENT = V_1_0_0_RC1; public static final Version CURRENT = V_1_0_0_RC1;

View File

@ -34,9 +34,9 @@ import java.util.concurrent.TimeUnit;
public interface ActionFuture<T> extends Future<T> { public interface ActionFuture<T> extends Future<T> {
/** /**
* Similar to {@link #get()}, just wrapping the {@link InterruptedException} with * Similar to {@link #get()}, just catching the {@link InterruptedException} with
* {@link org.elasticsearch.ElasticsearchInterruptedException}, and throwing the actual * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
* cause of the {@link java.util.concurrent.ExecutionException}. * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
* <p/> * <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
@ -45,9 +45,9 @@ public interface ActionFuture<T> extends Future<T> {
T actionGet() throws ElasticsearchException; T actionGet() throws ElasticsearchException;
/** /**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just wrapping the {@link InterruptedException} with * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
* {@link org.elasticsearch.ElasticsearchInterruptedException}, and throwing the actual * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
* cause of the {@link java.util.concurrent.ExecutionException}. * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
* <p/> * <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
@ -56,9 +56,9 @@ public interface ActionFuture<T> extends Future<T> {
T actionGet(String timeout) throws ElasticsearchException; T actionGet(String timeout) throws ElasticsearchException;
/** /**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just wrapping the {@link InterruptedException} with * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
* {@link org.elasticsearch.ElasticsearchInterruptedException}, and throwing the actual * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
* cause of the {@link java.util.concurrent.ExecutionException}. * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
* <p/> * <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
@ -69,9 +69,9 @@ public interface ActionFuture<T> extends Future<T> {
T actionGet(long timeoutMillis) throws ElasticsearchException; T actionGet(long timeoutMillis) throws ElasticsearchException;
/** /**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just wrapping the {@link InterruptedException} with * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
* {@link org.elasticsearch.ElasticsearchInterruptedException}, and throwing the actual * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
* cause of the {@link java.util.concurrent.ExecutionException}. * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
* <p/> * <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
@ -80,9 +80,9 @@ public interface ActionFuture<T> extends Future<T> {
T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException; T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException;
/** /**
* Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just wrapping the {@link InterruptedException} with * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
* {@link org.elasticsearch.ElasticsearchInterruptedException}, and throwing the actual * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
* cause of the {@link java.util.concurrent.ExecutionException}. * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
* <p/> * <p/>
* <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
* from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is

View File

@ -45,6 +45,7 @@ import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream; import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream; import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
/** /**
* Create snapshot request * Create snapshot request
@ -70,6 +71,8 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
private IndicesOptions indicesOptions = IndicesOptions.strict(); private IndicesOptions indicesOptions = IndicesOptions.strict();
private boolean partial = false;
private Settings settings = EMPTY_SETTINGS; private Settings settings = EMPTY_SETTINGS;
private boolean includeGlobalState = true; private boolean includeGlobalState = true;
@ -187,7 +190,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
} }
/** /**
* Retuns a list of indices that should be included into the snapshot * Returns a list of indices that should be included into the snapshot
* *
* @return list of indices * @return list of indices
*/ */
@ -215,6 +218,27 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
return this; return this;
} }
/**
* Returns true if indices with unavailable shards should be be partially snapshotted.
*
* @return the desired behaviour regarding indices options
*/
public boolean partial() {
return partial;
}
/**
* Set to true to allow indices with unavailable shards to be partially snapshotted.
*
* @param partial true if indices with unavailable shards should be be partially snapshotted.
* @return this request
*/
public CreateSnapshotRequest partial(boolean partial) {
this.partial = partial;
return this;
}
/** /**
* If set to true the request should wait for the snapshot completion before returning. * If set to true the request should wait for the snapshot completion before returning.
* *
@ -315,6 +339,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
/** /**
* Returns true if global state should be stored as part of the snapshot * Returns true if global state should be stored as part of the snapshot
*
* @return true if global state should be stored as part of the snapshot * @return true if global state should be stored as part of the snapshot
*/ */
public boolean includeGlobalState() { public boolean includeGlobalState() {
@ -353,17 +378,15 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings"); throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
} }
} else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) { } else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) {
assert entry.getValue() instanceof String; ignoreUnavailable = nodeBooleanValue(entry.getValue());
ignoreUnavailable = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) { } else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) {
assert entry.getValue() instanceof String; allowNoIndices = nodeBooleanValue(entry.getValue());
allowNoIndices = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) { } else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) {
assert entry.getValue() instanceof String; expandWildcardsOpen = nodeBooleanValue(entry.getValue());
expandWildcardsOpen = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) { } else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) {
assert entry.getValue() instanceof String; expandWildcardsClosed = nodeBooleanValue(entry.getValue());
expandWildcardsClosed = Boolean.valueOf(entry.getValue().toString()); } else if (name.equals("partial")) {
partial(nodeBooleanValue(entry.getValue()));
} else if (name.equals("settings")) { } else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) { if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object"); throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
@ -450,6 +473,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
settings = readSettingsFromStream(in); settings = readSettingsFromStream(in);
includeGlobalState = in.readBoolean(); includeGlobalState = in.readBoolean();
waitForCompletion = in.readBoolean(); waitForCompletion = in.readBoolean();
partial = in.readBoolean();
} }
@Override @Override
@ -462,5 +486,6 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
writeSettingsToStream(settings, out); writeSettingsToStream(settings, out);
out.writeBoolean(includeGlobalState); out.writeBoolean(includeGlobalState);
out.writeBoolean(waitForCompletion); out.writeBoolean(waitForCompletion);
out.writeBoolean(partial);
} }
} }

View File

@ -112,6 +112,17 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil
return this; return this;
} }
/**
* If set to true the request should snapshot indices with unavailable shards
*
* @param partial true if request should snapshot indices with unavailable shards
* @return this builder
*/
public CreateSnapshotRequestBuilder setPartial(boolean partial) {
request.partial(partial);
return this;
}
/** /**
* Sets repository-specific snapshot settings. * Sets repository-specific snapshot settings.
* <p/> * <p/>

View File

@ -78,6 +78,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA
new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository()) new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
.indices(request.indices()) .indices(request.indices())
.indicesOptions(request.indicesOptions()) .indicesOptions(request.indicesOptions())
.partial(request.partial())
.settings(request.settings()) .settings(request.settings())
.includeGlobalState(request.includeGlobalState()) .includeGlobalState(request.includeGlobalState())
.masterNodeTimeout(request.masterNodeTimeout()); .masterNodeTimeout(request.masterNodeTimeout());

View File

@ -44,6 +44,7 @@ import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream; import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream; import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
/** /**
* Restore snapshot request * Restore snapshot request
@ -397,17 +398,13 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings"); throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
} }
} else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) { } else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) {
assert entry.getValue() instanceof String; ignoreUnavailable = nodeBooleanValue(entry.getValue());
ignoreUnavailable = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) { } else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) {
assert entry.getValue() instanceof String; allowNoIndices = nodeBooleanValue(entry.getValue());
allowNoIndices = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) { } else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) {
assert entry.getValue() instanceof String; expandWildcardsOpen = nodeBooleanValue(entry.getValue());
expandWildcardsOpen = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) { } else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) {
assert entry.getValue() instanceof String; expandWildcardsClosed = nodeBooleanValue(entry.getValue());
expandWildcardsClosed = Boolean.valueOf(entry.getValue().toString());
} else if (name.equals("settings")) { } else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) { if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object"); throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");

View File

@ -19,21 +19,25 @@
package org.elasticsearch.action.admin.indices.alias; package org.elasticsearch.action.admin.indices.alias;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.AliasAction.Type;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.FilterBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
@ -46,115 +50,260 @@ import static org.elasticsearch.cluster.metadata.AliasAction.readAliasAction;
*/ */
public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> { public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> {
private List<AliasAction> aliasActions = Lists.newArrayList(); private List<AliasActions> allAliasActions = Lists.newArrayList();
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
public IndicesAliasesRequest() { public IndicesAliasesRequest() {
} }
/*
* Aliases can be added by passing multiple indices to the Request and
* deleted by passing multiple indices and aliases. They are expanded into
* distinct AliasAction instances when the request is processed. This class
* holds the AliasAction and in addition the arrays or alias names and
* indices that is later used to create the final AliasAction instances.
*/
public static class AliasActions {
private String[] indices = Strings.EMPTY_ARRAY;
private String[] aliases = Strings.EMPTY_ARRAY;
private AliasAction aliasAction;
public AliasActions(AliasAction.Type type, String[] indices, String[] aliases) {
aliasAction = new AliasAction(type);
indices(indices);
aliases(aliases);
}
public AliasActions(AliasAction.Type type, String index, String alias) {
aliasAction = new AliasAction(type);
indices(index);
aliases(alias);
}
AliasActions(AliasAction.Type type, String[] index, String alias) {
aliasAction = new AliasAction(type);
indices(index);
aliases(alias);
}
public AliasActions(AliasAction action) {
this.aliasAction = action;
indices(action.index());
aliases(action.alias());
}
public AliasActions(Type type, String index, String[] aliases) {
aliasAction = new AliasAction(type);
indices(index);
aliases(aliases);
}
public AliasActions() {
}
public AliasActions filter(Map<String, Object> filter) {
aliasAction.filter(filter);
return this;
}
public AliasActions filter(FilterBuilder filter) {
aliasAction.filter(filter);
return this;
}
public Type actionType() {
return aliasAction.actionType();
}
public void routing(String routing) {
aliasAction.routing(routing);
}
public void searchRouting(String searchRouting) {
aliasAction.searchRouting(searchRouting);
}
public void indexRouting(String indexRouting) {
aliasAction.indexRouting(indexRouting);
}
public AliasActions filter(String filter) {
aliasAction.filter(filter);
return this;
}
public void indices(String... indices) {
List<String> finalIndices = new ArrayList<String>();
for (String index : indices) {
if (index != null) {
finalIndices.add(index);
}
}
this.indices = finalIndices.toArray(new String[finalIndices.size()]);
}
public void aliases(String... aliases) {
this.aliases = aliases;
}
public String[] aliases() {
return aliases;
}
public String[] indices() {
return indices;
}
public AliasAction aliasAction() {
return aliasAction;
}
public String[] concreteAliases(MetaData metaData, String concreteIndex) {
if (aliasAction.actionType() == Type.REMOVE) {
//for DELETE we expand the aliases
String[] indexAsArray = {concreteIndex};
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasMetaData = metaData.findAliases(aliases, indexAsArray);
List<String> finalAliases = new ArrayList<String> ();
for (ObjectCursor<ImmutableList<AliasMetaData>> curAliases : aliasMetaData.values()) {
for (AliasMetaData aliasMeta: curAliases.value) {
finalAliases.add(aliasMeta.alias());
}
}
return finalAliases.toArray(new String[finalAliases.size()]);
} else {
//for add we just return the current aliases
return aliases;
}
}
public AliasActions readFrom(StreamInput in) throws IOException {
indices = in.readStringArray();
aliases = in.readStringArray();
aliasAction = readAliasAction(in);
return this;
}
public void writeTo(StreamOutput out) throws IOException {
out.writeStringArray(indices);
out.writeStringArray(aliases);
this.aliasAction.writeTo(out);
}
}
/** /**
* Adds an alias to the index. * Adds an alias to the index.
*
* @param index The index
* @param alias The alias * @param alias The alias
* @param indices The indices
*/ */
public IndicesAliasesRequest addAlias(String index, String alias) { public IndicesAliasesRequest addAlias(String alias, String... indices) {
aliasActions.add(new AliasAction(AliasAction.Type.ADD, index, alias)); addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias));
return this;
}
public void addAliasAction(AliasActions aliasAction) {
allAliasActions.add(aliasAction);
}
public IndicesAliasesRequest addAliasAction(AliasAction action) {
addAliasAction(new AliasActions(action));
return this;
}
/**
* Adds an alias to the index.
* @param alias The alias
* @param filter The filter
* @param indices The indices
*/
public IndicesAliasesRequest addAlias(String alias, Map<String, Object> filter, String... indices) {
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter));
return this; return this;
} }
/** /**
* Adds an alias to the index. * Adds an alias to the index.
*
* @param index The index
* @param alias The alias
* @param filter The filter
*/
public IndicesAliasesRequest addAlias(String index, String alias, String filter) {
aliasActions.add(new AliasAction(AliasAction.Type.ADD, index, alias, filter));
return this;
}
/**
* Adds an alias to the index.
*
* @param index The index
* @param alias The alias
* @param filter The filter
*/
public IndicesAliasesRequest addAlias(String index, String alias, Map<String, Object> filter) {
if (filter == null || filter.isEmpty()) {
aliasActions.add(new AliasAction(AliasAction.Type.ADD, index, alias));
return this;
}
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(filter);
aliasActions.add(new AliasAction(AliasAction.Type.ADD, index, alias, builder.string()));
return this;
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
}
}
/**
* Adds an alias to the index.
*
* @param index The index
* @param alias The alias * @param alias The alias
* @param filterBuilder The filter * @param filterBuilder The filter
* @param indices The indices
*/ */
public IndicesAliasesRequest addAlias(String index, String alias, FilterBuilder filterBuilder) { public IndicesAliasesRequest addAlias(String alias, FilterBuilder filterBuilder, String... indices) {
if (filterBuilder == null) { addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder));
aliasActions.add(new AliasAction(AliasAction.Type.ADD, index, alias)); return this;
return this;
}
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.close();
return addAlias(index, alias, builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
}
} }
/**
* Removes an alias to the index.
*
* @param indices The indices
* @param aliases The aliases
*/
public IndicesAliasesRequest removeAlias(String[] indices, String... aliases) {
addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases));
return this;
}
/** /**
* Removes an alias to the index. * Removes an alias to the index.
* *
* @param index The index * @param index The index
* @param alias The alias * @param aliases The aliases
*/ */
public IndicesAliasesRequest removeAlias(String index, String alias) { public IndicesAliasesRequest removeAlias(String index, String... aliases) {
aliasActions.add(new AliasAction(AliasAction.Type.REMOVE, index, alias)); addAliasAction(new AliasActions(AliasAction.Type.REMOVE, index, aliases));
return this; return this;
} }
public IndicesAliasesRequest addAliasAction(AliasAction action) { List<AliasActions> aliasActions() {
aliasActions.add(action); return this.allAliasActions;
return this;
} }
List<AliasAction> aliasActions() { public List<AliasActions> getAliasActions() {
return this.aliasActions;
}
public List<AliasAction> getAliasActions() {
return aliasActions(); return aliasActions();
} }
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null; ActionRequestValidationException validationException = null;
if (aliasActions.isEmpty()) { if (allAliasActions.isEmpty()) {
return addValidationError("Must specify at least one alias action", validationException); return addValidationError("Must specify at least one alias action", validationException);
} }
for (AliasAction aliasAction : aliasActions) { for (AliasActions aliasAction : allAliasActions) {
if (!Strings.hasText(aliasAction.alias())) { if (aliasAction.actionType() == AliasAction.Type.ADD) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH) + "] requires an [alias] to be set", validationException); if (aliasAction.aliases.length != 1) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "] requires exactly one [alias] to be set", validationException);
}
if (!Strings.hasText(aliasAction.aliases[0])) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "] requires an [alias] to be set", validationException);
}
} else {
if (aliasAction.aliases.length == 0) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: aliases may not be empty", validationException);
}
for (String alias : aliasAction.aliases) {
if (!Strings.hasText(alias)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: [alias] may not be empty string", validationException);
}
}
if (CollectionUtils.isEmpty(aliasAction.indices)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: indices may not be empty", validationException);
}
} }
if (!Strings.hasText(aliasAction.index())) { if (!CollectionUtils.isEmpty(aliasAction.indices)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH) + "] requires an [index] to be set", validationException); for (String index : aliasAction.indices) {
if (!Strings.hasText(index)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: [index] may not be empty string", validationException);
}
}
} }
} }
return validationException; return validationException;
@ -165,7 +314,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
super.readFrom(in); super.readFrom(in);
int size = in.readVInt(); int size = in.readVInt();
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
aliasActions.add(readAliasAction(in)); allAliasActions.add(readAliasActions(in));
} }
readTimeout(in); readTimeout(in);
} }
@ -173,10 +322,20 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeVInt(aliasActions.size()); out.writeVInt(allAliasActions.size());
for (AliasAction aliasAction : aliasActions) { for (AliasActions aliasAction : allAliasActions) {
aliasAction.writeTo(out); aliasAction.writeTo(out);
} }
writeTimeout(out); writeTimeout(out);
} }
public IndicesOptions indicesOptions() {
return indicesOptions;
}
private AliasActions readAliasActions(StreamInput in) throws IOException {
AliasActions actions = new AliasActions();
return actions.readFrom(in);
}
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.alias; package org.elasticsearch.action.admin.indices.alias;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.client.internal.InternalIndicesAdminClient; import org.elasticsearch.client.internal.InternalIndicesAdminClient;
@ -36,15 +37,26 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
public IndicesAliasesRequestBuilder(IndicesAdminClient indicesClient) { public IndicesAliasesRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new IndicesAliasesRequest()); super((InternalIndicesAdminClient) indicesClient, new IndicesAliasesRequest());
} }
/** /**
* Adds an alias to the index. * Adds an alias to the index.
* *
* @param index The index * @param index The index
* @param alias The alias * @param alias The alias
*/ */
public IndicesAliasesRequestBuilder addAlias(String index, String alias) { public IndicesAliasesRequestBuilder addAlias(String index, String alias) {
request.addAlias(index, alias); request.addAlias(alias, index);
return this;
}
/**
* Adds an alias to the index.
*
* @param index The indices
* @param alias The alias
*/
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias) {
request.addAlias(alias, indices);
return this; return this;
} }
@ -56,46 +68,74 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
* @param filter The filter * @param filter The filter
*/ */
public IndicesAliasesRequestBuilder addAlias(String index, String alias, String filter) { public IndicesAliasesRequestBuilder addAlias(String index, String alias, String filter) {
request.addAlias(index, alias, filter); AliasActions action = new AliasActions(AliasAction.Type.ADD, index, alias).filter(filter);
request.addAliasAction(action);
return this; return this;
} }
/** /**
* Adds an alias to the index. * Adds an alias to the index.
* *
* @param index The index * @param indices The indices
* @param alias The alias
* @param filter The filter
*/
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, String filter) {
AliasActions action = new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter);
request.addAliasAction(action);
return this;
}
/**
* Adds an alias to the index.
*
* @param indices The indices
* @param alias The alias
* @param filter The filter
*/
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias, Map<String, Object> filter) {
request.addAlias(alias, filter, indices);
return this;
}
/**
* Adds an alias to the index.
*
* @param index The indices
* @param alias The alias * @param alias The alias
* @param filter The filter * @param filter The filter
*/ */
public IndicesAliasesRequestBuilder addAlias(String index, String alias, Map<String, Object> filter) { public IndicesAliasesRequestBuilder addAlias(String index, String alias, Map<String, Object> filter) {
request.addAlias(index, alias, filter); request.addAlias(alias, filter, index);
return this; return this;
} }
/** /**
* Adds an alias to the index. * Adds an alias to the index.
* *
* @param index The index * @param indices The indices
* @param alias The alias
* @param filterBuilder The filter
*/
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, FilterBuilder filterBuilder) {
request.addAlias(alias, filterBuilder, indices);
return this;
}
/**
* Adds an alias to the index.
*
* @param index The index
* @param alias The alias * @param alias The alias
* @param filterBuilder The filter * @param filterBuilder The filter
*/ */
public IndicesAliasesRequestBuilder addAlias(String index, String alias, FilterBuilder filterBuilder) { public IndicesAliasesRequestBuilder addAlias(String index, String alias, FilterBuilder filterBuilder) {
request.addAlias(index, alias, filterBuilder); request.addAlias(alias, filterBuilder, index);
return this; return this;
} }
/** /**
* Adds an alias action to the request. * Removes an alias from the index.
*
* @param aliasAction The alias Action
*/
public IndicesAliasesRequestBuilder addAliasAction(AliasAction aliasAction) {
request.addAliasAction(aliasAction);
return this;
}
/**
* Removes an alias to the index.
* *
* @param index The index * @param index The index
* @param alias The alias * @param alias The alias
@ -104,9 +144,54 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
request.removeAlias(index, alias); request.removeAlias(index, alias);
return this; return this;
} }
/**
* Removes aliases from the index.
*
* @param indices The indices
* @param aliases The aliases
*/
public IndicesAliasesRequestBuilder removeAlias(String[] indices, String... aliases) {
request.removeAlias(indices, aliases);
return this;
}
/**
* Removes aliases from the index.
*
* @param index The index
* @param aliases The aliases
*/
public IndicesAliasesRequestBuilder removeAlias(String index, String[] aliases) {
request.removeAlias(index, aliases);
return this;
}
@Override @Override
protected void doExecute(ActionListener<IndicesAliasesResponse> listener) { protected void doExecute(ActionListener<IndicesAliasesResponse> listener) {
((IndicesAdminClient) client).aliases(request, listener); ((IndicesAdminClient) client).aliases(request, listener);
} }
/**
* Adds an alias action to the request.
*
* @param aliasAction The alias action
*/
public IndicesAliasesRequestBuilder addAliasAction(AliasAction aliasAction) {
request.addAliasAction(aliasAction);
return this;
}
/**
* Adds an alias action to the request.
*
* @param aliasAction The alias action
*/
public IndicesAliasesRequestBuilder addAliasAction(
AliasActions action) {
request.addAliasAction(action);
return this;
}
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.alias;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
@ -33,9 +34,13 @@ import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
/** /**
@ -76,8 +81,10 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA
@Override @Override
protected ClusterBlockException checkBlock(IndicesAliasesRequest request, ClusterState state) { protected ClusterBlockException checkBlock(IndicesAliasesRequest request, ClusterState state) {
Set<String> indices = Sets.newHashSet(); Set<String> indices = Sets.newHashSet();
for (AliasAction aliasAction : request.aliasActions()) { for (AliasActions aliasAction : request.aliasActions()) {
indices.add(aliasAction.index()); for (String index : aliasAction.indices()) {
indices.add(index);
}
} }
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, indices.toArray(new String[indices.size()])); return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, indices.toArray(new String[indices.size()]));
} }
@ -85,9 +92,36 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA
@Override @Override
protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) throws ElasticsearchException { protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) throws ElasticsearchException {
//Expand the indices names
List<AliasActions> actions = request.aliasActions();
List<AliasAction> finalActions = new ArrayList<AliasAction>();
boolean hasOnlyDeletesButNoneCanBeDone = true;
Set<String> aliases = new HashSet<String>();
for (AliasActions action : actions) {
//expand indices
String[] concreteIndices = state.metaData().concreteIndices(action.indices(), request.indicesOptions());
//collect the aliases
for (String alias : action.aliases()) {
aliases.add(alias);
}
for (String index : concreteIndices) {
for (String alias : action.concreteAliases(state.metaData(), index)) {
AliasAction finalAction = new AliasAction(action.aliasAction());
finalAction.index(index);
finalAction.alias(alias);
finalActions.add(finalAction);
//if there is only delete requests, none will be added if the types do not map to any existing type
hasOnlyDeletesButNoneCanBeDone = false;
}
}
}
if (hasOnlyDeletesButNoneCanBeDone && actions.size() != 0) {
throw new AliasesMissingException(aliases.toArray(new String[aliases.size()]));
}
request.aliasActions().clear();
IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest() IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.actions(request.aliasActions().toArray(new AliasAction[request.aliasActions().size()])); .actions(finalActions.toArray(new AliasAction[finalActions.size()]));
indexAliasesService.indicesAliases(updateRequest, new ClusterStateUpdateListener() { indexAliasesService.indicesAliases(updateRequest, new ClusterStateUpdateListener() {
@Override @Override

View File

@ -26,7 +26,7 @@ import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
*/ */
public class DeleteMappingClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<DeleteMappingClusterStateUpdateRequest> { public class DeleteMappingClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<DeleteMappingClusterStateUpdateRequest> {
private String type; private String[] types;
DeleteMappingClusterStateUpdateRequest() { DeleteMappingClusterStateUpdateRequest() {
@ -35,15 +35,15 @@ public class DeleteMappingClusterStateUpdateRequest extends IndicesClusterStateU
/** /**
* Returns the type to be removed * Returns the type to be removed
*/ */
public String type() { public String[] types() {
return type; return types;
} }
/** /**
* Sets the type to be removed * Sets the type to be removed
*/ */
public DeleteMappingClusterStateUpdateRequest type(String type) { public DeleteMappingClusterStateUpdateRequest types(String[] types) {
this.type = type; this.types = types;
return this; return this;
} }
} }

View File

@ -23,8 +23,10 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.CollectionUtils;
import java.io.IOException; import java.io.IOException;
@ -37,7 +39,7 @@ public class DeleteMappingRequest extends AcknowledgedRequest<DeleteMappingReque
private String[] indices; private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String type; private String[] types;
DeleteMappingRequest() { DeleteMappingRequest() {
} }
@ -53,8 +55,29 @@ public class DeleteMappingRequest extends AcknowledgedRequest<DeleteMappingReque
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null; ActionRequestValidationException validationException = null;
if (type == null) { if (CollectionUtils.isEmpty(types)) {
validationException = addValidationError("mapping type is missing", validationException); validationException = addValidationError("mapping type is missing", validationException);
} else {
validationException = checkForEmptyString(validationException, types);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("index is missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
} }
return validationException; return validationException;
} }
@ -84,52 +107,35 @@ public class DeleteMappingRequest extends AcknowledgedRequest<DeleteMappingReque
} }
/** /**
* The mapping type. * The mapping types.
*/ */
public String type() { public String[] types() {
return type; return types;
} }
/** /**
* The type of the mappings to remove. * The type of the mappings to remove.
*/ */
public DeleteMappingRequest type(String type) { public DeleteMappingRequest types(String... types) {
this.type = type; this.types = types;
return this; return this;
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
indices = new String[in.readVInt()]; indices = in.readStringArray();
for (int i = 0; i < indices.length; i++) {
indices[i] = in.readString();
}
indicesOptions = IndicesOptions.readIndicesOptions(in); indicesOptions = IndicesOptions.readIndicesOptions(in);
if (in.readBoolean()) { types = in.readStringArray();
type = in.readString();
}
readTimeout(in, Version.V_0_90_6); readTimeout(in, Version.V_0_90_6);
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
if (indices == null) { out.writeStringArrayNullable(indices);
out.writeVInt(0);
} else {
out.writeVInt(indices.length);
for (String index : indices) {
out.writeString(index);
}
}
indicesOptions.writeIndicesOptions(out); indicesOptions.writeIndicesOptions(out);
if (type == null) { out.writeStringArrayNullable(types);
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeString(type);
}
writeTimeout(out, Version.V_0_90_6); writeTimeout(out, Version.V_0_90_6);
} }
} }

View File

@ -45,8 +45,8 @@ public class DeleteMappingRequestBuilder extends AcknowledgedRequestBuilder<Dele
/** /**
* Sets the type of the mapping to remove * Sets the type of the mapping to remove
*/ */
public DeleteMappingRequestBuilder setType(String type) { public DeleteMappingRequestBuilder setType(String... types) {
request.type(type); request.types(types);
return this; return this;
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.mapping.delete; package org.elasticsearch.action.admin.indices.mapping.delete;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse;
@ -37,15 +38,23 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.FilterBuilders; import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.BoolFilterBuilder;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TypeFilterBuilder;
import org.elasticsearch.indices.TypeMissingException;
import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.HashSet;
import java.util.Set;
/** /**
* Delete mapping action. * Delete mapping action.
*/ */
@ -108,8 +117,26 @@ public class TransportDeleteMappingAction extends TransportMasterNodeOperationAc
flushAction.execute(Requests.flushRequest(request.indices()), new ActionListener<FlushResponse>() { flushAction.execute(Requests.flushRequest(request.indices()), new ActionListener<FlushResponse>() {
@Override @Override
public void onResponse(FlushResponse flushResponse) { public void onResponse(FlushResponse flushResponse) {
// get all types that need to be deleted.
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = clusterService.state().metaData().findMappings(
request.indices(), request.types()
);
// create OrFilter with type filters within to account for different types
BoolFilterBuilder filterBuilder = new BoolFilterBuilder();
Set<String> types = new HashSet<String>();
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> typesMeta : result) {
for (ObjectObjectCursor<String, MappingMetaData> type : typesMeta.value) {
filterBuilder.should(new TypeFilterBuilder(type.key));
types.add(type.key);
}
}
if (types.size() == 0) {
throw new TypeMissingException(new Index("_all"), request.types(), "No index has the type.");
}
request.types(types.toArray(new String[types.size()]));
QuerySourceBuilder querySourceBuilder = new QuerySourceBuilder() QuerySourceBuilder querySourceBuilder = new QuerySourceBuilder()
.setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), FilterBuilders.typeFilter(request.type()))); .setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), filterBuilder));
deleteByQueryAction.execute(Requests.deleteByQueryRequest(request.indices()).source(querySourceBuilder), new ActionListener<DeleteByQueryResponse>() { deleteByQueryAction.execute(Requests.deleteByQueryRequest(request.indices()).source(querySourceBuilder), new ActionListener<DeleteByQueryResponse>() {
@Override @Override
public void onResponse(DeleteByQueryResponse deleteByQueryResponse) { public void onResponse(DeleteByQueryResponse deleteByQueryResponse) {
@ -126,7 +153,7 @@ public class TransportDeleteMappingAction extends TransportMasterNodeOperationAc
protected void removeMapping() { protected void removeMapping() {
DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest() DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest()
.indices(request.indices()).type(request.type()) .indices(request.indices()).types(request.types())
.ackTimeout(request.timeout()) .ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout()); .masterNodeTimeout(request.masterNodeTimeout());

View File

@ -71,6 +71,7 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) { for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
builder.startObject(indexEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE); builder.startObject(indexEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("mappings");
for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) { for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
builder.startObject(typeEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE); builder.startObject(typeEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) { for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {
@ -81,6 +82,7 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
builder.endObject(); builder.endObject();
} }
builder.endObject(); builder.endObject();
builder.endObject();
} }
return builder; return builder;
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.settings.get; package org.elasticsearch.action.admin.indices.settings.get;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeOperationRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -34,7 +35,7 @@ public class GetSettingsRequest extends MasterNodeOperationRequest<GetSettingsRe
private String[] indices = Strings.EMPTY_ARRAY; private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
private String prefix; private String[] names = Strings.EMPTY_ARRAY;
public GetSettingsRequest indices(String... indices) { public GetSettingsRequest indices(String... indices) {
this.indices = indices; this.indices = indices;
@ -54,18 +55,22 @@ public class GetSettingsRequest extends MasterNodeOperationRequest<GetSettingsRe
return indicesOptions; return indicesOptions;
} }
public String prefix() { public String[] names() {
return prefix; return names;
} }
public GetSettingsRequest prefix(String prefix) { public GetSettingsRequest names(String... names) {
this.prefix = prefix; this.names = names;
return this; return this;
} }
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
return null; ActionRequestValidationException validationException = null;
if (names == null) {
validationException = ValidateActions.addValidationError("names may not be null", validationException);
}
return validationException;
} }
@Override @Override
@ -73,7 +78,7 @@ public class GetSettingsRequest extends MasterNodeOperationRequest<GetSettingsRe
super.readFrom(in); super.readFrom(in);
indices = in.readStringArray(); indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in); indicesOptions = IndicesOptions.readIndicesOptions(in);
prefix = in.readOptionalString(); names = in.readStringArray();
} }
@Override @Override
@ -81,6 +86,6 @@ public class GetSettingsRequest extends MasterNodeOperationRequest<GetSettingsRe
super.writeTo(out); super.writeTo(out);
out.writeStringArray(indices); out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out); indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(prefix); out.writeStringArray(names);
} }
} }

View File

@ -54,8 +54,8 @@ public class GetSettingsRequestBuilder extends MasterNodeOperationRequestBuilder
return this; return this;
} }
public GetSettingsRequestBuilder setPrefix(String prefix) { public GetSettingsRequestBuilder setNames(String... names) {
request.prefix(prefix); request.names(names);
return this; return this;
} }

View File

@ -27,9 +27,11 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -80,10 +82,10 @@ public class TransportGetSettingsAction extends TransportMasterNodeOperationActi
} }
Settings settings = settingsFilter.filterSettings(indexMetaData.settings()); Settings settings = settingsFilter.filterSettings(indexMetaData.settings());
if (request.prefix() != null) { if (!CollectionUtils.isEmpty(request.names())) {
ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder();
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) { for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
if (entry.getKey().startsWith(request.prefix())) { if (Regex.simpleMatch(request.names(), entry.getKey())) {
settingsBuilder.put(entry.getKey(), entry.getValue()); settingsBuilder.put(entry.getKey(), entry.getValue());
} }
} }

View File

@ -26,15 +26,18 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.CollectionUtils;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/** /**
* A request to delete an index warmer. * A request to delete an index warmer.
*/ */
public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> { public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> {
private String name; private String[] names = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] indices = Strings.EMPTY_ARRAY; private String[] indices = Strings.EMPTY_ARRAY;
@ -46,36 +49,60 @@ public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest
* *
* @param name: the name (or wildcard expression) of the warmer to match, null to delete all. * @param name: the name (or wildcard expression) of the warmer to match, null to delete all.
*/ */
public DeleteWarmerRequest(String name) { public DeleteWarmerRequest(String... names) {
this.name = name; names(names);
} }
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
return null; ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(names)) {
validationException = addValidationError("warmer names are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, names);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("indices are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
}
return validationException;
} }
/** /**
* The name to delete. * The name to delete.
*/ */
@Nullable @Nullable
String name() { String[] names() {
return name; return names;
} }
/** /**
* The name (or wildcard expression) of the index warmer to delete, or null * The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers. * to delete all warmers.
*/ */
public DeleteWarmerRequest name(@Nullable String name) { public DeleteWarmerRequest names(@Nullable String... names) {
this.name = name; this.names = names;
return this; return this;
} }
/** /**
* Sets the indices this put mapping operation will execute on. * Sets the indices this put mapping operation will execute on.
*/ */
public DeleteWarmerRequest indices(String[] indices) { public DeleteWarmerRequest indices(String... indices) {
this.indices = indices; this.indices = indices;
return this; return this;
} }
@ -99,7 +126,7 @@ public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
name = in.readOptionalString(); names = in.readStringArray();
indices = in.readStringArray(); indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in); indicesOptions = IndicesOptions.readIndicesOptions(in);
readTimeout(in, Version.V_0_90_6); readTimeout(in, Version.V_0_90_6);
@ -108,7 +135,7 @@ public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeOptionalString(name); out.writeStringArrayNullable(names);
out.writeStringArrayNullable(indices); out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out); indicesOptions.writeIndicesOptions(out);
writeTimeout(out, Version.V_0_90_6); writeTimeout(out, Version.V_0_90_6);

View File

@ -43,8 +43,8 @@ public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder<Delet
* The name (or wildcard expression) of the index warmer to delete, or null * The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers. * to delete all warmers.
*/ */
public DeleteWarmerRequestBuilder setName(String name) { public DeleteWarmerRequestBuilder setNames(String... names) {
request.name(name); request.names(names);
return this; return this;
} }

View File

@ -42,6 +42,7 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.Arrays;
import java.util.List; import java.util.List;
/** /**
@ -89,7 +90,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct
@Override @Override
protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener<DeleteWarmerResponse> listener) throws ElasticsearchException { protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener<DeleteWarmerResponse> listener) throws ElasticsearchException {
clusterService.submitStateUpdateTask("delete_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask() { clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask() {
@Override @Override
public boolean mustAck(DiscoveryNode discoveryNode) { public boolean mustAck(DiscoveryNode discoveryNode) {
@ -118,7 +119,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct
@Override @Override
public void onFailure(String source, Throwable t) { public void onFailure(String source, Throwable t) {
logger.debug("failed to delete warmer [{}] on indices [{}]", t, request.name(), request.indices()); logger.debug("failed to delete warmer [{}] on indices [{}]", t, Arrays.toString(request.names()), request.indices());
listener.onFailure(t); listener.onFailure(t);
} }
@ -136,10 +137,16 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct
if (warmers != null) { if (warmers != null) {
List<IndexWarmersMetaData.Entry> entries = Lists.newArrayList(); List<IndexWarmersMetaData.Entry> entries = Lists.newArrayList();
for (IndexWarmersMetaData.Entry entry : warmers.entries()) { for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
if (request.name() == null || Regex.simpleMatch(request.name(), entry.name())) { boolean keepWarmer = true;
globalFoundAtLeastOne = true; for (String warmer : request.names()) {
// don't add it... if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals("_all")) {
} else { globalFoundAtLeastOne = true;
keepWarmer = false;
// don't add it...
break;
}
}
if (keepWarmer) {
entries.add(entry); entries.add(entry);
} }
} }
@ -153,11 +160,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct
} }
if (!globalFoundAtLeastOne) { if (!globalFoundAtLeastOne) {
if (request.name() == null) { throw new IndexWarmerMissingException(request.names());
// full match, just return with no failure
return currentState;
}
throw new IndexWarmerMissingException(request.name());
} }
if (logger.isInfoEnabled()) { if (logger.isInfoEnabled()) {
@ -169,8 +172,10 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE); IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers != null) { if (warmers != null) {
for (IndexWarmersMetaData.Entry entry : warmers.entries()) { for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
if (Regex.simpleMatch(request.name(), entry.name())) { for (String warmer : request.names()) {
logger.info("[{}] delete warmer [{}]", index, entry.name()); if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals("_all")) {
logger.info("[{}] delete warmer [{}]", index, entry.name());
}
} }
} }
} }

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.support; package org.elasticsearch.action.support;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchInterruptedException; import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -44,7 +44,8 @@ public abstract class AdapterActionFuture<T, L> extends BaseFuture<T> implements
try { try {
return get(); return get();
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new ElasticsearchInterruptedException(e.getMessage()); Thread.currentThread().interrupt();
throw new ElasticsearchIllegalStateException("Future got interrupted", e);
} catch (ExecutionException e) { } catch (ExecutionException e) {
throw rethrowExecutionException(e); throw rethrowExecutionException(e);
} }
@ -72,7 +73,8 @@ public abstract class AdapterActionFuture<T, L> extends BaseFuture<T> implements
} catch (TimeoutException e) { } catch (TimeoutException e) {
throw new ElasticsearchTimeoutException(e.getMessage()); throw new ElasticsearchTimeoutException(e.getMessage());
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new ElasticsearchInterruptedException(e.getMessage()); Thread.currentThread().interrupt();
throw new ElasticsearchIllegalStateException("Future got interrupted", e);
} catch (ExecutionException e) { } catch (ExecutionException e) {
throw rethrowExecutionException(e); throw rethrowExecutionException(e);
} }

View File

@ -82,7 +82,20 @@ public class AliasAction implements Streamable {
private AliasAction() { private AliasAction() {
} }
public AliasAction(AliasAction other) {
this.actionType = other.actionType;
this.index = other.index;
this.alias = other.alias;
this.filter = other.filter;
this.indexRouting = other.indexRouting;
this.searchRouting = other.searchRouting;
}
public AliasAction(Type actionType) {
this.actionType = actionType;
}
public AliasAction(Type actionType, String index, String alias) { public AliasAction(Type actionType, String index, String alias) {
this.actionType = actionType; this.actionType = actionType;
this.index = index; this.index = index;
@ -99,10 +112,20 @@ public class AliasAction implements Streamable {
public Type actionType() { public Type actionType() {
return actionType; return actionType;
} }
public AliasAction index(String index) {
this.index = index;
return this;
}
public String index() { public String index() {
return index; return index;
} }
public AliasAction alias(String alias) {
this.alias = alias;
return this;
}
public String alias() { public String alias() {
return alias; return alias;
@ -181,42 +204,21 @@ public class AliasAction implements Streamable {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
actionType = Type.fromValue(in.readByte()); actionType = Type.fromValue(in.readByte());
index = in.readString(); index = in.readOptionalString();
alias = in.readString(); alias = in.readOptionalString();
if (in.readBoolean()) { filter = in.readOptionalString();
filter = in.readString(); indexRouting = in.readOptionalString();
} searchRouting = in.readOptionalString();
if (in.readBoolean()) {
indexRouting = in.readString();
}
if (in.readBoolean()) {
searchRouting = in.readString();
}
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeByte(actionType.value()); out.writeByte(actionType.value());
out.writeString(index); out.writeOptionalString(index);
out.writeString(alias); out.writeOptionalString(alias);
if (filter == null) { out.writeOptionalString(filter);
out.writeBoolean(false); out.writeOptionalString(indexRouting);
} else { out.writeOptionalString(searchRouting);
out.writeBoolean(true);
out.writeString(filter);
}
if (indexRouting == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeString(indexRouting);
}
if (searchRouting == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeString(searchRouting);
}
} }
public static AliasAction newAddAliasAction(String index, String alias) { public static AliasAction newAddAliasAction(String index, String alias) {

View File

@ -279,7 +279,7 @@ public class MetaData implements Iterable<IndexMetaData> {
return ImmutableOpenMap.of(); return ImmutableOpenMap.of();
} }
boolean matchAllAliases = aliases.length == 0; boolean matchAllAliases = matchAllAliases(aliases);
ImmutableOpenMap.Builder<String, ImmutableList<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder<String, ImmutableList<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder();
Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys()); Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys());
for (String index : intersection) { for (String index : intersection) {
@ -298,6 +298,15 @@ public class MetaData implements Iterable<IndexMetaData> {
} }
return mapBuilder.build(); return mapBuilder.build();
} }
private boolean matchAllAliases(final String[] aliases) {
for (String alias : aliases) {
if (alias.equals("_all")) {
return true;
}
}
return aliases.length == 0;
}
/** /**
* Checks if at least one of the specified aliases exists in the specified concrete indices. Wildcards are supported in the * Checks if at least one of the specified aliases exists in the specified concrete indices. Wildcards are supported in the
@ -331,6 +340,12 @@ public class MetaData implements Iterable<IndexMetaData> {
return false; return false;
} }
/*
* Finds all mappings for types and concrete indices. Types are expanded to
* include all types that match the glob patterns in the types array. Empty
* types array, null or {"_all"} will be expanded to all types available for
* the given indices.
*/
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices, final String[] types) { public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices, final String[] types) {
assert types != null; assert types != null;
assert concreteIndices != null; assert concreteIndices != null;
@ -343,7 +358,7 @@ public class MetaData implements Iterable<IndexMetaData> {
for (String index : intersection) { for (String index : intersection) {
IndexMetaData indexMetaData = indices.get(index); IndexMetaData indexMetaData = indices.get(index);
ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings; ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings;
if (types.length == 0) { if (isAllTypes(types)) {
indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all
} else { } else {
@ -361,12 +376,14 @@ public class MetaData implements Iterable<IndexMetaData> {
return indexMapBuilder.build(); return indexMapBuilder.build();
} }
public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] warmers) { public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
assert warmers != null; assert uncheckedWarmers != null;
assert concreteIndices != null; assert concreteIndices != null;
if (concreteIndices.length == 0) { if (concreteIndices.length == 0) {
return ImmutableOpenMap.of(); return ImmutableOpenMap.of();
} }
// special _all check to behave the same like not specifying anything for the warmers (not for the indices)
final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers;
ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys()); Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys());
@ -945,7 +962,18 @@ public class MetaData implements Iterable<IndexMetaData> {
* @return true if the provided array maps to all indices, false otherwise * @return true if the provided array maps to all indices, false otherwise
*/ */
public boolean isAllIndices(String[] aliasesOrIndices) { public boolean isAllIndices(String[] aliasesOrIndices) {
return aliasesOrIndices == null || aliasesOrIndices.length == 0 || isExplicitAllIndices(aliasesOrIndices); return aliasesOrIndices == null || aliasesOrIndices.length == 0 || isExplicitAllPattern(aliasesOrIndices);
}
/**
* Identifies whether the array containing type names given as argument refers to all types
* The empty or null array identifies all types
*
* @param types the array containing index names
* @return true if the provided array maps to all indices, false otherwise
*/
public boolean isAllTypes(String[] types) {
return types == null || types.length == 0 || isExplicitAllPattern(types);
} }
/** /**
@ -955,7 +983,7 @@ public class MetaData implements Iterable<IndexMetaData> {
* @param aliasesOrIndices the array containing index names * @param aliasesOrIndices the array containing index names
* @return true if the provided array explicitly maps to all indices, false otherwise * @return true if the provided array explicitly maps to all indices, false otherwise
*/ */
public boolean isExplicitAllIndices(String[] aliasesOrIndices) { public boolean isExplicitAllPattern(String[] aliasesOrIndices) {
return aliasesOrIndices != null && aliasesOrIndices.length == 1 && "_all".equals(aliasesOrIndices[0]); return aliasesOrIndices != null && aliasesOrIndices.length == 1 && "_all".equals(aliasesOrIndices[0]);
} }

View File

@ -385,7 +385,7 @@ public class MetaDataMappingService extends AbstractComponent {
} }
public void removeMapping(final DeleteMappingClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) { public void removeMapping(final DeleteMappingClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
clusterService.submitStateUpdateTask("remove-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask() { clusterService.submitStateUpdateTask("remove-mapping [" + Arrays.toString(request.types()) + "]", Priority.HIGH, new AckedClusterStateUpdateTask() {
@Override @Override
public boolean mustAck(DiscoveryNode discoveryNode) { public boolean mustAck(DiscoveryNode discoveryNode) {
@ -428,21 +428,30 @@ public class MetaDataMappingService extends AbstractComponent {
String latestIndexWithout = null; String latestIndexWithout = null;
for (String indexName : request.indices()) { for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName); IndexMetaData indexMetaData = currentState.metaData().index(indexName);
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData);
if (indexMetaData != null) { if (indexMetaData != null) {
if (indexMetaData.mappings().containsKey(request.type())) { boolean isLatestIndexWithout = true;
builder.put(IndexMetaData.builder(indexMetaData).removeMapping(request.type())); for (String type : request.types()) {
changed = true; if (indexMetaData.mappings().containsKey(type)) {
} else { indexBuilder.removeMapping(type);
changed = true;
isLatestIndexWithout = false;
}
}
if (isLatestIndexWithout) {
latestIndexWithout = indexMetaData.index(); latestIndexWithout = indexMetaData.index();
} }
} }
builder.put(indexBuilder);
} }
if (!changed) { if (!changed) {
throw new TypeMissingException(new Index(latestIndexWithout), request.type()); throw new TypeMissingException(new Index(latestIndexWithout), request.types());
} }
logger.info("[{}] remove_mapping [{}]", request.indices(), request.type()); logger.info("[{}] remove_mapping [{}]", request.indices(), request.types());
return ClusterState.builder(currentState).metaData(builder).build(); return ClusterState.builder(currentState).metaData(builder).build();
} }

View File

@ -203,7 +203,8 @@ public class SnapshotMetaData implements MetaData.Custom {
STARTED((byte) 1), STARTED((byte) 1),
SUCCESS((byte) 2), SUCCESS((byte) 2),
FAILED((byte) 3), FAILED((byte) 3),
ABORTED((byte) 4); ABORTED((byte) 4),
MISSING((byte) 5);
private byte value; private byte value;
@ -216,7 +217,43 @@ public class SnapshotMetaData implements MetaData.Custom {
} }
public boolean completed() { public boolean completed() {
return this == SUCCESS || this == FAILED; switch (this) {
case INIT:
return false;
case STARTED:
return false;
case SUCCESS:
return true;
case FAILED:
return true;
case ABORTED:
return false;
case MISSING:
return true;
default:
assert false;
return true;
}
}
public boolean failed() {
switch (this) {
case INIT:
return false;
case STARTED:
return false;
case SUCCESS:
return false;
case FAILED:
return true;
case ABORTED:
return true;
case MISSING:
return true;
default:
assert false;
return false;
}
} }
public static State fromValue(byte value) { public static State fromValue(byte value) {
@ -231,6 +268,8 @@ public class SnapshotMetaData implements MetaData.Custom {
return FAILED; return FAILED;
case 4: case 4:
return ABORTED; return ABORTED;
case 5:
return MISSING;
default: default:
throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]"); throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]");
} }

View File

@ -26,6 +26,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.util.UnicodeUtil;
import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.util.CollectionUtils;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.IOException; import java.io.IOException;
@ -1565,4 +1566,14 @@ public class Strings {
return s.substring(beginIndex, endIndex); return s.substring(beginIndex, endIndex);
} }
} }
/**
* If an array only consists of zero or one element, which is "*" or "_all" return an empty array
* which is usually used as everything
*/
public static boolean isAllOrWildcard(String[] data) {
return CollectionUtils.isEmpty(data) ||
data.length == 1 && ("_all".equals(data[0]) || "*".equals(data[0]));
}
} }

View File

@ -187,5 +187,16 @@ public enum CollectionUtils {
} }
return uniqueCount; return uniqueCount;
} }
/**
* Checks if the given array contains any elements.
*
* @param array The array to check
*
* @return false if the array contains an element, true if not or the array is null.
*/
public static boolean isEmpty(Object[] array) {
return array == null || array.length == 0;
}
} }

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.util.concurrent; package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.ElasticsearchInterruptedException;
import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.CounterMetric;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
@ -44,7 +43,8 @@ public class EsAbortPolicy implements XRejectedExecutionHandler {
try { try {
((SizeBlockingQueue) queue).forcePut(r); ((SizeBlockingQueue) queue).forcePut(r);
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new ElasticsearchInterruptedException(e.getMessage(), e); Thread.currentThread().interrupt();
throw new ElasticsearchIllegalStateException("forced execution, but got interrupted", e);
} }
return; return;
} }

View File

@ -154,24 +154,18 @@ public class XContentMapValues {
} }
sb.append(key); sb.append(key);
String path = sb.toString(); String path = sb.toString();
boolean excluded = false;
for (String exclude : excludes) { if (Regex.simpleMatch(excludes, path)) {
if (Regex.simpleMatch(exclude, path)) {
excluded = true;
break;
}
}
if (excluded) {
sb.setLength(mark); sb.setLength(mark);
continue; continue;
} }
boolean exactIncludeMatch;
boolean exactIncludeMatch = false; // true if the current position was specifically mentioned
boolean pathIsPrefixOfAnInclude = false; // true if potentially a sub scope can be included
if (includes.length == 0) { if (includes.length == 0) {
// implied match anything // implied match anything
exactIncludeMatch = true; exactIncludeMatch = true;
} else { } else {
exactIncludeMatch = false;
boolean pathIsPrefixOfAnInclude = false;
for (String include : includes) { for (String include : includes) {
// check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field // check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field
// note, this does not work well with middle matches, like obj1.*.obj3 // note, this does not work well with middle matches, like obj1.*.obj3
@ -198,11 +192,12 @@ public class XContentMapValues {
break; break;
} }
} }
if (!pathIsPrefixOfAnInclude && !exactIncludeMatch) { }
// skip subkeys, not interesting.
sb.setLength(mark); if (!(pathIsPrefixOfAnInclude || exactIncludeMatch)) {
continue; // skip subkeys, not interesting.
} sb.setLength(mark);
continue;
} }
@ -210,7 +205,7 @@ public class XContentMapValues {
Map<String, Object> innerInto = Maps.newHashMap(); Map<String, Object> innerInto = Maps.newHashMap();
// if we had an exact match, we want give deeper excludes their chance // if we had an exact match, we want give deeper excludes their chance
filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb); filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);
if (!innerInto.isEmpty()) { if (exactIncludeMatch || !innerInto.isEmpty()) {
into.put(entry.getKey(), innerInto); into.put(entry.getKey(), innerInto);
} }
} else if (entry.getValue() instanceof List) { } else if (entry.getValue() instanceof List) {

View File

@ -239,6 +239,8 @@ public class DocumentMapper implements ToXContent {
} }
}; };
public static final String ALLOW_TYPE_WRAPPER = "index.mapping.allow_type_wrapper";
private final String index; private final String index;
private final Settings indexSettings; private final Settings indexSettings;
@ -494,18 +496,15 @@ public class DocumentMapper implements ToXContent {
} else if (token != XContentParser.Token.FIELD_NAME) { } else if (token != XContentParser.Token.FIELD_NAME) {
throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist");
} }
if (type.equals(parser.currentName())) { // first field is the same as the type, this might be because the
// first field is the same as the type, this might be because the type is provided, and the object exists within it // type is provided, and the object exists within it or because
// or because there is a valid field that by chance is named as the type // there is a valid field that by chance is named as the type.
// Because of this, by default wrapping a document in a type is
// Note, in this case, we only handle plain value types, an object type will be analyzed as if it was the type itself // disabled, but can be enabled by setting
// and other same level fields will be ignored // index.mapping.allow_type_wrapper to true
token = parser.nextToken(); if (type.equals(parser.currentName()) && indexSettings.getAsBoolean(ALLOW_TYPE_WRAPPER, false)) {
parser.nextToken();
countDownTokens++; countDownTokens++;
// commented out, allow for same type with START_OBJECT, we do our best to handle it except for the above corner case
// if (token != XContentParser.Token.START_OBJECT) {
// throw new MapperException("Malformed content, a field with the same name as the type must be an object with the properties/fields within it");
// }
} }
for (RootMapper rootMapper : rootMappersOrdered) { for (RootMapper rootMapper : rootMappersOrdered) {

View File

@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableSet;
import com.google.common.collect.UnmodifiableIterator; import com.google.common.collect.UnmodifiableIterator;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.ElasticsearchInterruptedException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.*; import org.elasticsearch.common.inject.*;
@ -276,7 +275,8 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
try { try {
latch.await(); latch.await();
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new ElasticsearchInterruptedException("interrupted closing index [ " + index().name() + "]", e); logger.debug("Interrupted closing index [{}]", e, index().name());
Thread.currentThread().interrupt();
} }
} }

View File

@ -23,17 +23,19 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexException; import org.elasticsearch.index.IndexException;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.util.Arrays;
/** /**
* *
*/ */
public class TypeMissingException extends IndexException { public class TypeMissingException extends IndexException {
public TypeMissingException(Index index, String type) { public TypeMissingException(Index index, String... types) {
super(index, "type[" + type + "] missing"); super(index, "type[" + Arrays.toString(types) + "] missing");
} }
public TypeMissingException(Index index, String type, String message) { public TypeMissingException(Index index, String[] types, String message) {
super(index, "type[" + type + "] missing: " + message); super(index, "type[" + Arrays.toString(types) + "] missing: " + message);
} }

View File

@ -158,4 +158,13 @@ public abstract class RestRequest implements ToXContent.Params {
} }
return Strings.splitStringByCommaToArray(value); return Strings.splitStringByCommaToArray(value);
} }
public String[] paramAsStringArrayOrEmptyIfAll(String key) {
String[] params = paramAsStringArray(key, Strings.EMPTY_ARRAY);
if (Strings.isAllOrWildcard(params)) {
return Strings.EMPTY_ARRAY;
}
return params;
}
} }

View File

@ -16,21 +16,31 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.rest.action.admin.indices.alias.delete;
package org.elasticsearch; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.rest.RestStatus;
import java.util.Arrays;
/** /**
* The same as {@link InterruptedException} simply a runtime one.
*
* *
*/ */
public class ElasticsearchInterruptedException extends ElasticsearchException { public class AliasesMissingException extends ElasticsearchException {
public ElasticsearchInterruptedException(String message) { private final String[] names;
super(message);
public AliasesMissingException(String... names) {
super("aliases [" + Arrays.toString(names) + "] missing");
this.names = names;
} }
public ElasticsearchInterruptedException(String message, Throwable cause) { public String[] names() {
super(message, cause); return this.names;
}
@Override
public RestStatus status() {
return RestStatus.NOT_FOUND;
} }
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.rest.action.admin.indices.alias.delete;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
@ -35,15 +36,16 @@ public class RestIndexDeleteAliasesAction extends BaseRestHandler {
public RestIndexDeleteAliasesAction(Settings settings, Client client, RestController controller) { public RestIndexDeleteAliasesAction(Settings settings, Client client, RestController controller) {
super(settings, client); super(settings, client);
controller.registerHandler(DELETE, "/{index}/_alias/{name}", this); controller.registerHandler(DELETE, "/{index}/_alias/{name}", this);
controller.registerHandler(DELETE, "/{index}/_aliases/{name}", this);
} }
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
final String index = request.param("index"); final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
String alias = request.param("name"); final String[] aliases = Strings.splitStringByCommaToArray(request.param("name"));
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
indicesAliasesRequest.removeAlias(index, alias); indicesAliasesRequest.removeAlias(indices, aliases);
indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestResponseActionListener<IndicesAliasesResponse>(request, channel, logger)); client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestResponseActionListener<IndicesAliasesResponse>(request, channel, logger));

View File

@ -49,6 +49,7 @@ public class RestGetAliasesAction extends BaseRestHandler {
@Inject @Inject
public RestGetAliasesAction(Settings settings, Client client, RestController controller) { public RestGetAliasesAction(Settings settings, Client client, RestController controller) {
super(settings, client); super(settings, client);
controller.registerHandler(GET, "/_alias/", this);
controller.registerHandler(GET, "/_alias/{name}", this); controller.registerHandler(GET, "/_alias/{name}", this);
controller.registerHandler(GET, "/{index}/_alias/{name}", this); controller.registerHandler(GET, "/{index}/_alias/{name}", this);
controller.registerHandler(GET, "/{index}/_alias", this); controller.registerHandler(GET, "/{index}/_alias", this);
@ -56,7 +57,7 @@ public class RestGetAliasesAction extends BaseRestHandler {
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
String[] aliases = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name");
final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases);
getAliasesRequest.indices(indices); getAliasesRequest.indices(indices);
@ -68,7 +69,11 @@ public class RestGetAliasesAction extends BaseRestHandler {
public void onResponse(GetAliasesResponse response) { public void onResponse(GetAliasesResponse response) {
try { try {
XContentBuilder builder = RestXContentBuilder.restContentBuilder(request); XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
if (response.getAliases().isEmpty()) { // empty body, if indices were specified but no aliases were
if (indices.length > 0 && response.getAliases().isEmpty()) {
channel.sendResponse(new XContentRestResponse(request, OK, RestXContentBuilder.emptyBuilder(request)));
return;
} else if (response.getAliases().isEmpty()) {
String message = String.format(Locale.ROOT, "alias [%s] missing", toNamesString(getAliasesRequest.aliases())); String message = String.format(Locale.ROOT, "alias [%s] missing", toNamesString(getAliasesRequest.aliases()));
builder.startObject() builder.startObject()
.field("error", message) .field("error", message)

View File

@ -29,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -37,6 +38,7 @@ import org.elasticsearch.rest.action.support.RestXContentBuilder;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.common.Strings.isAllOrWildcard;
import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.OK;
@ -50,16 +52,19 @@ public class RestGetIndicesAliasesAction extends BaseRestHandler {
super(settings, client); super(settings, client);
controller.registerHandler(GET, "/_aliases", this); controller.registerHandler(GET, "/_aliases", this);
controller.registerHandler(GET, "/{index}/_aliases", this); controller.registerHandler(GET, "/{index}/_aliases", this);
controller.registerHandler(GET, "/{index}/_aliases/{name}", this);
controller.registerHandler(GET, "/_aliases/{name}", this);
} }
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final String[] aliases = Strings.splitStringByCommaToArray(request.param("name"));
ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest()
.routingTable(false) .routingTable(false)
.nodes(false) .nodes(false)
.indices(indices); .indices(indices);
clusterStateRequest.listenerThreaded(false); clusterStateRequest.listenerThreaded(false);
@ -71,20 +76,22 @@ public class RestGetIndicesAliasesAction extends BaseRestHandler {
XContentBuilder builder = RestXContentBuilder.restContentBuilder(request); XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
builder.startObject(); builder.startObject();
final boolean isAllAliasesRequested = isAllOrWildcard(aliases);
for (IndexMetaData indexMetaData : metaData) { for (IndexMetaData indexMetaData : metaData) {
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE); builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("aliases"); builder.startObject("aliases");
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
AliasMetaData.Builder.toXContent(cursor.value, builder, ToXContent.EMPTY_PARAMS);
}
builder.endObject();
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
if (isAllAliasesRequested || Regex.simpleMatch(aliases, cursor.value.alias())) {
AliasMetaData.Builder.toXContent(cursor.value, builder, ToXContent.EMPTY_PARAMS);
}
}
builder.endObject();
builder.endObject(); builder.endObject();
} }
builder.endObject(); builder.endObject();
channel.sendResponse(new XContentRestResponse(request, OK, builder)); channel.sendResponse(new XContentRestResponse(request, OK, builder));
} catch (Throwable e) { } catch (Throwable e) {
onFailure(e); onFailure(e);
@ -101,4 +108,5 @@ public class RestGetIndicesAliasesAction extends BaseRestHandler {
} }
}); });
} }
} }

View File

@ -20,9 +20,11 @@ package org.elasticsearch.rest.action.admin.indices.alias.put;
import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
@ -32,6 +34,7 @@ import org.elasticsearch.rest.*;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT; import static org.elasticsearch.rest.RestRequest.Method.PUT;
/** /**
@ -43,13 +46,22 @@ public class RestIndexPutAliasAction extends BaseRestHandler {
super(settings, client); super(settings, client);
controller.registerHandler(PUT, "/{index}/_alias/{name}", this); controller.registerHandler(PUT, "/{index}/_alias/{name}", this);
controller.registerHandler(PUT, "/_alias/{name}", this); controller.registerHandler(PUT, "/_alias/{name}", this);
controller.registerHandler(PUT, "/{index}/_aliases/{name}", this);
controller.registerHandler(PUT, "/_aliases/{name}", this);
controller.registerHandler(PUT, "/{index}/_alias", this); controller.registerHandler(PUT, "/{index}/_alias", this);
controller.registerHandler(PUT, "/_alias", this); controller.registerHandler(PUT, "/_alias", this);
controller.registerHandler(POST, "/{index}/_alias/{name}", this);
controller.registerHandler(POST, "/_alias/{name}", this);
controller.registerHandler(POST, "/{index}/_aliases/{name}", this);
controller.registerHandler(POST, "/_aliases/{name}", this);
controller.registerHandler(PUT, "/{index}/_aliases", this);
//we cannot add POST for "/_aliases" because this is the _aliases api already defined in RestIndicesAliasesAction
} }
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
String index = request.param("index"); String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
String alias = request.param("name"); String alias = request.param("name");
Map<String, Object> filter = null; Map<String, Object> filter = null;
String routing = null; String routing = null;
@ -70,7 +82,7 @@ public class RestIndexPutAliasAction extends BaseRestHandler {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
} else if (token.isValue()) { } else if (token.isValue()) {
if ("index".equals(currentFieldName)) { if ("index".equals(currentFieldName)) {
index = parser.text(); indices = Strings.splitStringByCommaToArray(parser.text());
} else if ("alias".equals(currentFieldName)) { } else if ("alias".equals(currentFieldName)) {
alias = parser.text(); alias = parser.text();
} else if ("routing".equals(currentFieldName)) { } else if ("routing".equals(currentFieldName)) {
@ -102,9 +114,11 @@ public class RestIndexPutAliasAction extends BaseRestHandler {
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
AliasAction aliasAction = new AliasAction(AliasAction.Type.ADD, index, alias); String[] aliases = new String[] {alias};
IndicesAliasesRequest.AliasActions aliasAction = new AliasActions(AliasAction.Type.ADD, indices, aliases);
indicesAliasesRequest.addAliasAction(aliasAction); indicesAliasesRequest.addAliasAction(aliasAction);
indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
if (routing != null) { if (routing != null) {
aliasAction.routing(routing); aliasAction.routing(routing);

View File

@ -41,13 +41,18 @@ public class RestDeleteMappingAction extends BaseRestHandler {
super(settings, client); super(settings, client);
controller.registerHandler(DELETE, "/{index}/{type}/_mapping", this); controller.registerHandler(DELETE, "/{index}/{type}/_mapping", this);
controller.registerHandler(DELETE, "/{index}/{type}", this); controller.registerHandler(DELETE, "/{index}/{type}", this);
controller.registerHandler(DELETE, "/{index}/_mapping/{type}", this);
//support _mappings also
controller.registerHandler(DELETE, "/{index}/{type}/_mappings", this);
controller.registerHandler(DELETE, "/{index}/_mappings/{type}", this);
} }
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
DeleteMappingRequest deleteMappingRequest = deleteMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); DeleteMappingRequest deleteMappingRequest = deleteMappingRequest(Strings.splitStringByCommaToArray(request.param("index")));
deleteMappingRequest.listenerThreaded(false); deleteMappingRequest.listenerThreaded(false);
deleteMappingRequest.type(request.param("type")); deleteMappingRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
deleteMappingRequest.timeout(request.paramAsTime("timeout", deleteMappingRequest.timeout())); deleteMappingRequest.timeout(request.paramAsTime("timeout", deleteMappingRequest.timeout()));
deleteMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteMappingRequest.masterNodeTimeout())); deleteMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteMappingRequest.masterNodeTimeout()));
deleteMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteMappingRequest.indicesOptions())); deleteMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteMappingRequest.indicesOptions()));

View File

@ -49,17 +49,18 @@ public class RestGetFieldMappingAction extends BaseRestHandler {
public RestGetFieldMappingAction(Settings settings, Client client, RestController controller) { public RestGetFieldMappingAction(Settings settings, Client client, RestController controller) {
super(settings, client); super(settings, client);
controller.registerHandler(GET, "/_mapping/field/{fields}", this); controller.registerHandler(GET, "/_mapping/field/{fields}", this);
controller.registerHandler(GET, "/_mapping/{type}/field/{fields}", this);
controller.registerHandler(GET, "/{index}/_mapping/field/{fields}", this); controller.registerHandler(GET, "/{index}/_mapping/field/{fields}", this);
controller.registerHandler(GET, "/{index}/{type}/_mapping/field/{fields}", this); controller.registerHandler(GET, "/{index}/{type}/_mapping/field/{fields}", this);
controller.registerHandler(GET, "/{index}/_mapping/{type}/field/{fields}", this);
} }
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final String[] types = Strings.splitStringByCommaToArray(request.param("type")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type");
boolean local = request.paramAsBooleanOptional("local", false); boolean local = request.paramAsBooleanOptional("local", false);
final String[] fields = Strings.splitStringByCommaToArray(request.param("fields")); final String[] fields = Strings.splitStringByCommaToArray(request.param("fields"));
GetFieldMappingsRequest getMappingsRequest = new GetFieldMappingsRequest(); GetFieldMappingsRequest getMappingsRequest = new GetFieldMappingsRequest();
getMappingsRequest.indices(indices).types(types).local(local).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false)); getMappingsRequest.indices(indices).types(types).local(local).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false));
getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions()));

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndexMissingException;
import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.TypeMissingException;
@ -53,12 +54,15 @@ public class RestGetMappingAction extends BaseRestHandler {
controller.registerHandler(GET, "/_mapping", this); controller.registerHandler(GET, "/_mapping", this);
controller.registerHandler(GET, "/{index}/_mapping", this); controller.registerHandler(GET, "/{index}/_mapping", this);
controller.registerHandler(GET, "/{index}/{type}/_mapping", this); controller.registerHandler(GET, "/{index}/{type}/_mapping", this);
controller.registerHandler(GET, "/{index}/_mappings/{type}", this);
controller.registerHandler(GET, "/{index}/_mapping/{type}", this);
controller.registerHandler(GET, "/_mapping/{type}", this);
} }
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel) { public void handleRequest(final RestRequest request, final RestChannel channel) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final String[] types = Strings.splitStringByCommaToArray(request.param("type")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type");
boolean local = request.paramAsBooleanOptional("local", false); boolean local = request.paramAsBooleanOptional("local", false);
GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); GetMappingsRequest getMappingsRequest = new GetMappingsRequest();
getMappingsRequest.indices(indices).types(types).local(local); getMappingsRequest.indices(indices).types(types).local(local);
@ -74,7 +78,7 @@ public class RestGetMappingAction extends BaseRestHandler {
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsByIndex = response.getMappings(); ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsByIndex = response.getMappings();
if (mappingsByIndex.isEmpty()) { if (mappingsByIndex.isEmpty()) {
if (indices.length != 0 && types.length != 0) { if (indices.length != 0 && types.length != 0) {
channel.sendResponse(new XContentThrowableRestResponse(request, new TypeMissingException(new Index(indices[0]), types[0]))); channel.sendResponse(new XContentRestResponse(request, OK, RestXContentBuilder.emptyBuilder(request)));
} else if (indices.length != 0) { } else if (indices.length != 0) {
channel.sendResponse(new XContentThrowableRestResponse(request, new IndexMissingException(new Index(indices[0])))); channel.sendResponse(new XContentThrowableRestResponse(request, new IndexMissingException(new Index(indices[0]))));
} else if (types.length != 0) { } else if (types.length != 0) {
@ -87,12 +91,17 @@ public class RestGetMappingAction extends BaseRestHandler {
} }
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappingsByIndex) { for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappingsByIndex) {
if (indexEntry.value.isEmpty()) {
continue;
}
builder.startObject(indexEntry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(indexEntry.key, XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(Fields.MAPPINGS);
for (ObjectObjectCursor<String, MappingMetaData> typeEntry : indexEntry.value) { for (ObjectObjectCursor<String, MappingMetaData> typeEntry : indexEntry.value) {
builder.field(typeEntry.key); builder.field(typeEntry.key);
builder.map(typeEntry.value.sourceAsMap()); builder.map(typeEntry.value.sourceAsMap());
} }
builder.endObject(); builder.endObject();
builder.endObject();
} }
builder.endObject(); builder.endObject();
@ -112,4 +121,8 @@ public class RestGetMappingAction extends BaseRestHandler {
} }
}); });
} }
static class Fields {
static final XContentBuilderString MAPPINGS = new XContentBuilderString("mappings");
}
} }

View File

@ -37,14 +37,30 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT;
*/ */
public class RestPutMappingAction extends BaseRestHandler { public class RestPutMappingAction extends BaseRestHandler {
@Inject @Inject
public RestPutMappingAction(Settings settings, Client client, RestController controller) { public RestPutMappingAction(Settings settings, Client client, RestController controller) {
super(settings, client); super(settings, client);
controller.registerHandler(PUT, "/{index}/_mapping", this); controller.registerHandler(PUT, "/{index}/_mapping/", this);
controller.registerHandler(PUT, "/{index}/{type}/_mapping", this); controller.registerHandler(PUT, "/{index}/{type}/_mapping", this);
controller.registerHandler(PUT, "/{index}/_mapping/{type}", this);
controller.registerHandler(PUT, "/_mapping/{type}", this);
controller.registerHandler(POST, "/{index}/_mapping", this); controller.registerHandler(POST, "/{index}/_mapping/", this);
controller.registerHandler(POST, "/{index}/{type}/_mapping", this); controller.registerHandler(POST, "/{index}/{type}/_mapping", this);
controller.registerHandler(POST, "/{index}/_mapping/{type}", this);
controller.registerHandler(POST, "/_mapping/{type}", this);
//register the same paths, but with plural form _mappings
controller.registerHandler(PUT, "/{index}/_mappings/", this);
controller.registerHandler(PUT, "/{index}/{type}/_mappings", this);
controller.registerHandler(PUT, "/{index}/_mappings/{type}", this);
controller.registerHandler(PUT, "/_mappings/{type}", this);
controller.registerHandler(POST, "/{index}/_mappings/", this);
controller.registerHandler(POST, "/{index}/{type}/_mappings", this);
controller.registerHandler(POST, "/{index}/_mappings/{type}", this);
controller.registerHandler(POST, "/_mappings/{type}", this);
} }
@Override @Override

Some files were not shown because too many files have changed in this diff Show More