Merge branch 'master' into ccr
* master: Set the new lucene version for 6.4.0 [ML][TEST] Clean up jobs in ModelPlotIT Upgrade to 7.4.0-snapshot-1ed95c097b (#30357) Watcher: Ensure trigger service pauses execution (#30363) [DOCS] Added coming qualifiers in changelog [DOCS] Commented out empty sections in the changelog to fix the doc build. (#30372) Security: reduce garbage during index resolution (#30180) Make RepositoriesMetaData contents unmodifiable (#30361) Change quad tree max levels to 29. Closes #21191 (#29663) Test: use trial license in qa tests with security [ML] Add integration test for model plots (#30359) SQL: Fix bug caused by empty composites (#30343) [ML] Account for gaps in data counts after job is reopened (#30294) InternalEngineTests.testConcurrentOutOfOrderDocsOnReplica should use two documents (#30121) Change signature of Get Repositories Response (#30333) Tests: Use different watch ids per test in smoke test (#30331) [Docs] Add term query with normalizer example Adds Eclipse config for xpack licence headers (#30299) Watcher: Make start/stop cycle more predictable and synchronous (#30118) [test] add debug logging for packaging test [DOCS] Removed X-Pack Breaking Changes [DOCS] Fixes link to TLS LDAP info Update versions for start_trial after backport (#30218) Packaging: Set elasticsearch user to have non-existent homedir (#29007) [DOCS] Fixes broken links to bootstrap user (#30349) Fix NPE when CumulativeSum agg encounters null/empty bucket (#29641) Make licensing FIPS-140 compliant (#30251) [DOCS] Reorganizes authentication details in Stack Overview (#30280) Network: Remove http.enabled setting (#29601) Fix merging logic of Suggester Options (#29514) [DOCS] Adds LDAP realm configuration details (#30214) [DOCS] Adds native realm configuration details (#30215) ReplicationTracker.markAllocationIdAsInSync may hang if allocation is cancelled (#30316) [DOCS] Enables edit links for X-Pack pages (#30278) Packaging: Unmark systemd service file as a config file (#29004) SQL: Reduce number of ranges generated for comparisons (#30267) Tests: Simplify VersionUtils released version splitting (#30322) Cancelling a peer recovery on the source can leak a primary permit (#30318) Added changelog entry for deb prerelease version change (#30184) Convert server javadoc to html5 (#30279) Create default ES_TMPDIR on Windows (#30325) [Docs] Clarify `fuzzy_like_this` redirect (#30183) Post backport of #29658. Fix docs of the `_ignored` meta field. Remove MapperService#types(). (#29617) Remove useless version checks in REST tests. (#30165) Add a new `_ignored` meta field. (#29658) Move repository-azure fixture test to QA project (#30253) # Conflicts: # buildSrc/version.properties # server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
This commit is contained in:
commit
db14717098
10
build.gradle
10
build.gradle
|
@ -19,6 +19,7 @@
|
|||
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.apache.tools.ant.filters.ReplaceTokens
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.Version
|
||||
|
@ -406,10 +407,19 @@ allprojects {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
File licenseHeaderFile;
|
||||
if (eclipse.project.name.startsWith(':x-pack')) {
|
||||
licenseHeaderFile = new File(project.rootDir, 'buildSrc/src/main/resources/license-headers/elastic-license-header.txt')
|
||||
} else {
|
||||
licenseHeaderFile = new File(project.rootDir, 'buildSrc/src/main/resources/license-headers/oss-license-header.txt')
|
||||
}
|
||||
String licenseHeader = licenseHeaderFile.getText('UTF-8').replace('\n', '\\\\n')
|
||||
task copyEclipseSettings(type: Copy) {
|
||||
// TODO: "package this up" for external builds
|
||||
from new File(project.rootDir, 'buildSrc/src/main/resources/eclipse.settings')
|
||||
into '.settings'
|
||||
filter{ it.replaceAll('@@LICENSE_HEADER_TEXT@@', licenseHeader)}
|
||||
}
|
||||
// otherwise .settings is not nuked entirely
|
||||
task wipeEclipseSettings(type: Delete) {
|
||||
|
|
|
@ -549,22 +549,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
javadoc.classpath = javadoc.getClasspath().filter { f ->
|
||||
return classes.contains(f) == false
|
||||
}
|
||||
/*
|
||||
* Force html5 on projects that support it to silence the warning
|
||||
* that `javadoc` will change its defaults in the future.
|
||||
*
|
||||
* But not all of our javadoc is actually valid html5. So we
|
||||
* have to become valid incrementally. We only set html5 on the
|
||||
* projects we have converted so that we still get the annoying
|
||||
* warning on the unconverted ones. That will give us an
|
||||
* incentive to convert them....
|
||||
*/
|
||||
List html4Projects = [
|
||||
':server',
|
||||
]
|
||||
if (false == html4Projects.contains(project.path)) {
|
||||
javadoc.options.addBooleanOption('html5', true)
|
||||
}
|
||||
}
|
||||
configureJavadocJar(project)
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ class ClusterConfiguration {
|
|||
boolean debug = false
|
||||
|
||||
/**
|
||||
* Configuration of the setting <tt>discovery.zen.minimum_master_nodes</tt> on the nodes.
|
||||
* Configuration of the setting {@code discovery.zen.minimum_master_nodes} on the nodes.
|
||||
* In case of more than one node, this defaults to the number of nodes
|
||||
*/
|
||||
@Input
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,5 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 7.0.0-alpha1
|
||||
lucene = 7.4.0-snapshot-330fd18f20
|
||||
lucene = 7.4.0-snapshot-1ed95c097b
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.7
|
||||
|
|
|
@ -178,7 +178,6 @@ Closure commonPackageConfig(String type, boolean oss) {
|
|||
}
|
||||
|
||||
// ========= systemd =========
|
||||
configurationFile '/usr/lib/systemd/system/elasticsearch.service'
|
||||
into('/usr/lib/tmpfiles.d') {
|
||||
from "${packagingFiles}/systemd/elasticsearch.conf"
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ case "$1" in
|
|||
adduser --quiet \
|
||||
--system \
|
||||
--no-create-home \
|
||||
--home /nonexistent \
|
||||
--ingroup elasticsearch \
|
||||
--disabled-password \
|
||||
--shell /bin/false \
|
||||
|
@ -50,8 +51,9 @@ case "$1" in
|
|||
# Create elasticsearch user if not existing
|
||||
if ! id elasticsearch > /dev/null 2>&1 ; then
|
||||
echo -n "Creating elasticsearch user..."
|
||||
useradd -r \
|
||||
-M \
|
||||
useradd --system \
|
||||
--no-create-home \
|
||||
--home-dir /nonexistent \
|
||||
--gid elasticsearch \
|
||||
--shell /sbin/nologin \
|
||||
--comment "elasticsearch user" \
|
||||
|
|
|
@ -58,4 +58,7 @@ set ES_DISTRIBUTION_TYPE=${es.distribution.type}
|
|||
|
||||
if not defined ES_TMPDIR (
|
||||
set ES_TMPDIR=!TMP!\elasticsearch
|
||||
if not exist "!ES_TMPDIR!" (
|
||||
mkdir "!ES_TMPDIR!"
|
||||
)
|
||||
)
|
||||
|
|
|
@ -3,6 +3,10 @@
|
|||
|
||||
[partintro]
|
||||
--
|
||||
// To add a release, copy and paste the template text
|
||||
// and add a link to the new section. Note that release subheads must
|
||||
// be floated and sections cannot be empty.
|
||||
|
||||
// Use these for links to issue and pulls. Note issues and pulls redirect one to
|
||||
// each other on Github, so don't worry too much on using the right prefix.
|
||||
:issue: https://github.com/elastic/elasticsearch/issues/
|
||||
|
@ -12,13 +16,52 @@ This section summarizes the changes in each release.
|
|||
|
||||
* <<release-notes-7.0.0>>
|
||||
* <<release-notes-6.4.0>>
|
||||
|
||||
* <<release-notes-6.3.1>>
|
||||
|
||||
--
|
||||
|
||||
////
|
||||
// To add a release, copy and paste the following text, uncomment the relevant
|
||||
// sections, and add a link to the new section in the list of releases at the
|
||||
// top of the page. Note that release subheads must be floated and sections
|
||||
// cannot be empty.
|
||||
// TEMPLATE:
|
||||
|
||||
// [[release-notes-n.n.n]]
|
||||
// == {es} n.n.n
|
||||
|
||||
//[float]
|
||||
[[breaking-n.n.n]]
|
||||
//=== Breaking Changes
|
||||
|
||||
//[float]
|
||||
//=== Breaking Java Changes
|
||||
|
||||
//[float]
|
||||
//=== Deprecations
|
||||
|
||||
//[float]
|
||||
//=== New Features
|
||||
|
||||
//[float]
|
||||
//=== Enhancements
|
||||
|
||||
//[float]
|
||||
//=== Bug Fixes
|
||||
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
||||
//[float]
|
||||
//=== Known Issues
|
||||
|
||||
////
|
||||
|
||||
[[release-notes-7.0.0]]
|
||||
== {es} 7.0.0
|
||||
|
||||
coming[7.0.0]
|
||||
|
||||
[float]
|
||||
[[breaking-7.0.0]]
|
||||
=== Breaking Changes
|
||||
|
@ -26,6 +69,7 @@ This section summarizes the changes in each release.
|
|||
<<write-thread-pool-fallback, Removed `thread_pool.bulk.*` settings and
|
||||
`es.thread_pool.write.use_bulk_as_display_name` system property>> ({pull}29609[#29609])
|
||||
|
||||
<<systemd-service-file-config, Systemd service file is no longer marked as configuration>> ({pull}29004[#29004])
|
||||
<<remove-suggest-metric, Removed `suggest` metric on stats APIs>> ({pull}29635[#29635])
|
||||
|
||||
<<remove-field-caps-body, In field capabilities APIs, removed support for providing fields in the request body>> ({pull}30185[#30185])
|
||||
|
@ -34,6 +78,13 @@ Machine Learning::
|
|||
* The `max_running_jobs` node property is removed in this release. Use the
|
||||
`xpack.ml.max_open_jobs` setting instead. For more information, see <<ml-settings>>.
|
||||
|
||||
* <<remove-http-enabled, Removed `http.enabled` setting>> ({pull}29601[#29601])
|
||||
|
||||
//[float]
|
||||
//=== Breaking Java Changes
|
||||
|
||||
[float]
|
||||
=== Deprecations
|
||||
Monitoring::
|
||||
* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1`
|
||||
to disable monitoring data collection. Use `xpack.monitoring.collection.enabled`
|
||||
|
@ -44,6 +95,93 @@ Security::
|
|||
mappings, get field mappings, and field capabilities API are now only the
|
||||
ones that the user is authorized to access in case field level security is enabled.
|
||||
|
||||
//[float]
|
||||
//=== New Features
|
||||
|
||||
//[float]
|
||||
//=== Enhancements
|
||||
|
||||
[float]
|
||||
=== Bug Fixes
|
||||
|
||||
Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions
|
||||
({pull}29000[#29000])
|
||||
|
||||
[float]
|
||||
=== Regressions
|
||||
Fail snapshot operations early when creating or deleting a snapshot on a repository that has been
|
||||
written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140])
|
||||
|
||||
Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641])
|
||||
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
||||
//[float]
|
||||
//=== Known Issues
|
||||
|
||||
[[release-notes-6.4.0]]
|
||||
== {es} 6.4.0
|
||||
|
||||
coming[6.4.0]
|
||||
|
||||
//[float]
|
||||
[[breaking-6.4.0]]
|
||||
//=== Breaking Changes
|
||||
|
||||
//[float]
|
||||
//=== Breaking Java Changes
|
||||
|
||||
//[float]
|
||||
//=== Deprecations
|
||||
|
||||
[float]
|
||||
=== New Features
|
||||
|
||||
The new <<mapping-ignored-field,`_ignored`>> field allows to know which fields
|
||||
got ignored at index time because of the <<ignore-malformed,`ignore_malformed`>>
|
||||
option. ({pull}30140[#29658])
|
||||
|
||||
[float]
|
||||
=== Enhancements
|
||||
|
||||
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255])
|
||||
|
||||
Added new "Request" object flavored request methods. Prefer these instead of the
|
||||
multi-argument versions. ({pull}29623[#29623])
|
||||
|
||||
The cluster state listener to decide if watcher should be
|
||||
stopped/started/paused now runs far less code in an executor but is more
|
||||
synchronous and predictable. Also the trigger engine thread is only started on
|
||||
data nodes. And the Execute Watch API can be triggered regardless is watcher is
|
||||
started or stopped. ({pull}30118[#30118])
|
||||
|
||||
[float]
|
||||
=== Bug Fixes
|
||||
|
||||
Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216])
|
||||
|
||||
Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641])
|
||||
|
||||
Machine Learning::
|
||||
|
||||
* Account for gaps in data counts after job is reopened ({pull}30294[#30294])
|
||||
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
||||
//[float]
|
||||
//=== Known Issues
|
||||
|
||||
[[release-notes-6.3.1]]
|
||||
== Elasticsearch version 6.3.1
|
||||
|
||||
coming[6.3.1]
|
||||
|
||||
//[float]
|
||||
[[breaking-6.3.1]]
|
||||
//=== Breaking Changes
|
||||
|
||||
//[float]
|
||||
//=== Breaking Java Changes
|
||||
|
||||
|
@ -59,35 +197,7 @@ ones that the user is authorized to access in case field level security is enabl
|
|||
[float]
|
||||
=== Bug Fixes
|
||||
|
||||
Fail snapshot operations early when creating or deleting a snapshot on a repository that has been
|
||||
written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140])
|
||||
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
||||
//[float]
|
||||
//=== Known Issues
|
||||
|
||||
[[release-notes-6.4.0]]
|
||||
== {es} 6.4.0
|
||||
|
||||
//[float]
|
||||
//=== New Features
|
||||
|
||||
[float]
|
||||
=== Enhancements
|
||||
|
||||
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255])
|
||||
|
||||
Added new "Request" object flavored request methods. Prefer these instead of the
|
||||
multi-argument versions. ({pull}29623[#29623])
|
||||
|
||||
|
||||
[float]
|
||||
=== Bug Fixes
|
||||
|
||||
Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216])
|
||||
|
||||
Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180])
|
||||
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
|
|
@ -40,6 +40,11 @@ can be customised when a mapping type is created.
|
|||
|
||||
All fields in the document which contain non-null values.
|
||||
|
||||
<<mapping-ignored-field,`_ignored`>>::
|
||||
|
||||
All fields in the document that have been ignored at index time because of
|
||||
<<ignore-malformed,`ignore_malformed`>>.
|
||||
|
||||
[float]
|
||||
=== Routing meta-field
|
||||
|
||||
|
@ -57,6 +62,8 @@ can be customised when a mapping type is created.
|
|||
|
||||
include::fields/field-names-field.asciidoc[]
|
||||
|
||||
include::fields/ignored-field.asciidoc[]
|
||||
|
||||
include::fields/id-field.asciidoc[]
|
||||
|
||||
include::fields/index-field.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
[[mapping-ignored-field]]
|
||||
=== `_ignored` field
|
||||
|
||||
added[6.4.0]
|
||||
|
||||
The `_ignored` field indexes and stores the names of every field in a document
|
||||
that has been ignored because it was malformed and
|
||||
<<ignore-malformed,`ignore_malformed`>> was turned on.
|
||||
|
||||
This field is searchable with <<query-dsl-term-query,`term`>>,
|
||||
<<query-dsl-terms-query,`terms`>> and <<query-dsl-exists-query,`exists`>>
|
||||
queries, and is returned as part of the search hits.
|
||||
|
||||
For instance the below query matches all documents that have one or more fields
|
||||
that got ignored:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET _search
|
||||
{
|
||||
"query": {
|
||||
"exists": {
|
||||
"field": "_ignored"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
Similarly, the below query finds all documents whose `@timestamp` field was
|
||||
ignored at index time:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET _search
|
||||
{
|
||||
"query": {
|
||||
"term": {
|
||||
"_ignored": "@timestamp"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
|
@ -85,3 +85,13 @@ PUT my_index
|
|||
|
||||
<1> The `number_one` field inherits the index-level setting.
|
||||
<2> The `number_two` field overrides the index-level setting to turn off `ignore_malformed`.
|
||||
|
||||
==== Dealing with malformed fields
|
||||
|
||||
Malformed fields are silently ignored at indexing time when `ignore_malformed`
|
||||
is turned on. Whenever possible it is recommended to keep the number of
|
||||
documents that have a malformed field contained, or queries on this field will
|
||||
become meaningless. Elasticsearch makes it easy to check how many documents
|
||||
have malformed fields by using `exist` or `term` queries on the special
|
||||
<<mapping-ignored-field,`_ignored`>> field.
|
||||
|
||||
|
|
|
@ -7,7 +7,8 @@ produces a single token.
|
|||
|
||||
The `normalizer` is applied prior to indexing the keyword, as well as at
|
||||
search-time when the `keyword` field is searched via a query parser such as
|
||||
the <<query-dsl-match-query,`match`>> query.
|
||||
the <<query-dsl-match-query,`match`>> query or via a term level query
|
||||
such as the <<query-dsl-term-query,`term`>> query.
|
||||
|
||||
[source,js]
|
||||
--------------------------------
|
||||
|
@ -53,6 +54,15 @@ PUT index/_doc/3
|
|||
|
||||
POST index/_refresh
|
||||
|
||||
GET index/_search
|
||||
{
|
||||
"query": {
|
||||
"term": {
|
||||
"foo": "BAR"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET index/_search
|
||||
{
|
||||
"query": {
|
||||
|
@ -64,7 +74,7 @@ GET index/_search
|
|||
--------------------------------
|
||||
// CONSOLE
|
||||
|
||||
The above query matches documents 1 and 2 since `BÀR` is converted to `bar` at
|
||||
The above queries match documents 1 and 2 since `BÀR` is converted to `bar` at
|
||||
both index and query time.
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -32,7 +32,7 @@ best tree_levels value to honor this precision. The value should be a
|
|||
number followed by an optional distance unit. Valid distance units
|
||||
include: `in`, `inch`, `yd`, `yard`, `mi`, `miles`, `km`, `kilometers`,
|
||||
`m`,`meters`, `cm`,`centimeters`, `mm`, `millimeters`.
|
||||
| `meters`
|
||||
| `50m`
|
||||
|
||||
|`tree_levels` |Maximum number of layers to be used by the PrefixTree.
|
||||
This can be used to control the precision of shape representations and
|
||||
|
@ -42,7 +42,7 @@ certain level of understanding of the underlying implementation, users
|
|||
may use the `precision` parameter instead. However, Elasticsearch only
|
||||
uses the tree_levels parameter internally and this is what is returned
|
||||
via the mapping API even if you use the precision parameter.
|
||||
| `50m`
|
||||
| various
|
||||
|
||||
|`strategy` |The strategy parameter defines the approach for how to
|
||||
represent shapes at indexing and search time. It also influences the
|
||||
|
@ -119,14 +119,14 @@ Geohashes are base32 encoded strings of the bits of the latitude and
|
|||
longitude interleaved. So the longer the hash, the more precise it is.
|
||||
Each character added to the geohash represents another tree level and
|
||||
adds 5 bits of precision to the geohash. A geohash represents a
|
||||
rectangular area and has 32 sub rectangles. The maximum amount of levels
|
||||
in Elasticsearch is 24.
|
||||
rectangular area and has 32 sub rectangles. The maximum number of levels
|
||||
in Elasticsearch is 24; the default is 9.
|
||||
* QuadPrefixTree - Uses a
|
||||
http://en.wikipedia.org/wiki/Quadtree[quadtree] for grid squares.
|
||||
Similar to geohash, quad trees interleave the bits of the latitude and
|
||||
longitude the resulting hash is a bit set. A tree level in a quad tree
|
||||
represents 2 bits in this bit set, one for each coordinate. The maximum
|
||||
amount of levels for the quad trees in Elasticsearch is 50.
|
||||
number of levels for the quad trees in Elasticsearch is 29; the default is 21.
|
||||
|
||||
[[spatial-strategy]]
|
||||
[float]
|
||||
|
|
|
@ -28,6 +28,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
|
|||
* <<breaking_70_indices_changes>>
|
||||
* <<breaking_70_mappings_changes>>
|
||||
* <<breaking_70_search_changes>>
|
||||
* <<breaking_70_packaging_changes>>
|
||||
* <<breaking_70_plugins_changes>>
|
||||
* <<breaking_70_analysis_changes>>
|
||||
* <<breaking_70_api_changes>>
|
||||
|
@ -41,6 +42,7 @@ include::migrate_7_0/cluster.asciidoc[]
|
|||
include::migrate_7_0/indices.asciidoc[]
|
||||
include::migrate_7_0/mappings.asciidoc[]
|
||||
include::migrate_7_0/search.asciidoc[]
|
||||
include::migrate_7_0/packaging.asciidoc[]
|
||||
include::migrate_7_0/plugins.asciidoc[]
|
||||
include::migrate_7_0/api.asciidoc[]
|
||||
include::migrate_7_0/java.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
[[breaking_70_packaging_changes]]
|
||||
=== Packaging changes
|
||||
|
||||
[[systemd-service-file-config]]
|
||||
==== systemd service file is no longer configuration
|
||||
|
||||
The systemd service file `/usr/lib/systemd/system/elasticsearch.service`
|
||||
was previously marked as a configuration file in rpm and deb packages.
|
||||
Overrides to the systemd elasticsearch service should be made
|
||||
in `/etc/systemd/system/elasticsearch.service.d/override.conf`.
|
|
@ -25,3 +25,10 @@
|
|||
the system property `es.thread_pool.write.use_bulk_as_display_name` was
|
||||
available to keep the display output in APIs as `bulk` instead of `write`.
|
||||
These fallback settings and this system property have been removed.
|
||||
|
||||
[[remove-http-enabled]]
|
||||
==== Http enabled setting removed
|
||||
|
||||
The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing
|
||||
use of the transport client. This setting has been removed, as the transport client
|
||||
will be removed in the future, thus requiring HTTP to always be enabled.
|
||||
|
|
|
@ -110,16 +110,3 @@ client HTTP responses, defaults to unbounded.
|
|||
|
||||
It also uses the common
|
||||
<<modules-network,network settings>>.
|
||||
|
||||
[float]
|
||||
=== Disable HTTP
|
||||
|
||||
The http module can be completely disabled and not started by setting
|
||||
`http.enabled` to `false`. Elasticsearch nodes (and Java clients) communicate
|
||||
internally using the <<modules-transport,transport interface>>, not HTTP. It
|
||||
might make sense to disable the `http` layer entirely on nodes which are not
|
||||
meant to serve REST requests directly. For instance, you could disable HTTP on
|
||||
<<modules-node,data-only nodes>> if you also have
|
||||
<<modules-node,client nodes>> which are intended to serve all REST requests.
|
||||
Be aware, however, that you will not be able to send any REST requests (eg to
|
||||
retrieve node stats) directly to nodes which have HTTP disabled.
|
||||
|
|
|
@ -325,6 +325,5 @@ the <<cluster.name,`cluster.name`>>, the <<node.name,`node.name`>> and the
|
|||
<<modules-network,network settings>>.
|
||||
|
||||
ifdef::include-xpack[]
|
||||
:edit_url!:
|
||||
include::{xes-repo-dir}/node.asciidoc[]
|
||||
endif::include-xpack[]
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
|
||||
While the <<full-text-queries,full text queries>> will analyze the query
|
||||
string before executing, the _term-level queries_ operate on the exact terms
|
||||
that are stored in the inverted index.
|
||||
that are stored in the inverted index, and will normalize terms before executing
|
||||
only for <<keyword,`keyword`>> fields with <<normalizer,`normalizer`>> property.
|
||||
|
||||
These queries are usually used for structured data like numbers, dates, and
|
||||
enums, rather than full text fields. Alternatively, they allow you to craft
|
||||
|
|
|
@ -245,7 +245,7 @@ as a query in ``query context'' and as a filter in ``filter context'' (see
|
|||
[role="exclude",id="query-dsl-flt-query"]
|
||||
=== Fuzzy Like This Query
|
||||
|
||||
The `fuzzy_like_this` or `flt` query has been removed. Instead use
|
||||
The `fuzzy_like_this`, alternatively known as `flt`, query has been removed. Instead use either
|
||||
the <<query-dsl-match-query-fuzziness,`fuzziness`>> parameter with the
|
||||
<<query-dsl-match-query,`match` query>> or the <<query-dsl-mlt-query>>.
|
||||
|
||||
|
|
|
@ -66,8 +66,5 @@ include::install/rpm.asciidoc[]
|
|||
include::install/windows.asciidoc[]
|
||||
|
||||
ifdef::include-xpack[]
|
||||
:edit_url!:
|
||||
include::{xes-repo-dir}/setup/docker.asciidoc[]
|
||||
|
||||
:edit_url:
|
||||
endif::include-xpack[]
|
||||
|
|
|
@ -130,6 +130,8 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
|
|||
filters.put("brazilianstem", BrazilianStemTokenFilterFactory.class);
|
||||
filters.put("czechstem", CzechStemTokenFilterFactory.class);
|
||||
filters.put("germanstem", GermanStemTokenFilterFactory.class);
|
||||
// this filter is not exposed and should only be used internally
|
||||
filters.put("fixedshingle", Void.class);
|
||||
return filters;
|
||||
}
|
||||
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
"Custom normalizer with illegal filter in request":
|
||||
# Tests analyze api with normalizer. This is in the analysis-common module
|
||||
# because there are no filters that support multiTermAware
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: normalizer support in 6.0.0
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.analyze:
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
"Synonym filter with char_filter":
|
||||
# Tests analyze with synonym and char_filter. This is in the analysis-common module
|
||||
# because there are no char filters in core.
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: to support synonym same analysis chain were added in 6.0.0
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_synonym_with_charfilter
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
63ff4af3504881744695f6239fcb3e9c0e3240b1
|
|
@ -13,10 +13,6 @@ setup:
|
|||
|
||||
---
|
||||
"Parent/child inner hits":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: parent-join was added in 5.6.
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
|
|
|
@ -59,10 +59,6 @@ setup:
|
|||
|
||||
---
|
||||
"Test basic":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: parent-join was added in 5.6
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { sort: ["join_field", "_id"] }
|
||||
|
@ -104,10 +100,6 @@ setup:
|
|||
|
||||
---
|
||||
"Test parent_id query":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: parent-join was added in 5.6.
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
|
|
|
@ -581,11 +581,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
|
|||
final List<ParsedDocument> docs = new ArrayList<>();
|
||||
final DocumentMapper docMapper;
|
||||
final MapperService mapperService = context.getMapperService();
|
||||
Collection<String> types = mapperService.types();
|
||||
if (types.size() != 1) {
|
||||
throw new IllegalStateException("Only a single type should exist, but [" + types.size() + " types exists");
|
||||
}
|
||||
String type = types.iterator().next();
|
||||
String type = mapperService.documentMapper().type();
|
||||
if (documentType != null) {
|
||||
DEPRECATION_LOGGER.deprecated("[document_type] parameter has been deprecated because types have been deprecated");
|
||||
if (documentType.equals(type) == false) {
|
||||
|
|
|
@ -75,10 +75,14 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase {
|
|||
ReindexPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings() {
|
||||
Settings.Builder settings = Settings.builder().put(super.nodeSettings());
|
||||
settings.put(NetworkModule.HTTP_ENABLED.getKey(), true);
|
||||
// Whitelist reindexing from the http host we're going to use
|
||||
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*");
|
||||
settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME);
|
||||
|
|
|
@ -91,10 +91,13 @@ public class RetryTests extends ESIntegTestCase {
|
|||
return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(nodeSettings()).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable HTTP so we can test retries on reindex from remote; in this case the "remote" cluster is just this cluster
|
||||
}
|
||||
|
||||
final Settings nodeSettings() {
|
||||
return Settings.builder()
|
||||
// enable HTTP so we can test retries on reindex from remote; in this case the "remote" cluster is just this cluster
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
// whitelist reindexing from the HTTP host we're going to use
|
||||
.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*")
|
||||
.build();
|
||||
|
|
|
@ -8,10 +8,6 @@
|
|||
---
|
||||
"no query fails":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: explicit query is required since 6.0.0
|
||||
|
||||
- do:
|
||||
catch: /query is missing/
|
||||
delete_by_query:
|
||||
|
|
|
@ -50,11 +50,15 @@ public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase {
|
|||
|
||||
private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB);
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), LIMIT)
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -38,11 +38,15 @@ import static org.hamcrest.Matchers.hasSize;
|
|||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.put("http.pipelining", false)
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -37,11 +37,15 @@ import static org.hamcrest.Matchers.is;
|
|||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.put("http.pipelining", true)
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
5f3c053ef858c58c74a687a40f5451d19b69850b
|
|
@ -0,0 +1 @@
|
|||
a6e72085f7c2ade43ec0e5f52c227e6f715666ad
|
|
@ -0,0 +1 @@
|
|||
25c93466d0a2c41df0cf98de77d632f3f02fa98d
|
|
@ -0,0 +1 @@
|
|||
4688aaa48607ac26f6bf2567052019ab3fb2ff5e
|
|
@ -0,0 +1 @@
|
|||
ad71de632c9363c3f200cd5a240686256c7db431
|
|
@ -0,0 +1 @@
|
|||
96a630a7c4916358f129f6bac8718108811efe1a
|
|
@ -16,7 +16,6 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
esplugin {
|
||||
description 'The Azure Repository plugin adds support for Azure storage repositories.'
|
||||
|
@ -43,28 +42,12 @@ thirdPartyAudit.excludes = [
|
|||
'org.slf4j.LoggerFactory',
|
||||
]
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
/** A task to start the fixture which emulates an Azure Storage service **/
|
||||
task azureStorageFixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.azure.AzureStorageFixture', baseDir, 'container_test'
|
||||
check {
|
||||
// also execute the QA tests when testing the plugin
|
||||
dependsOn 'qa:microsoft-azure-storage:check'
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn azureStorageFixture
|
||||
|
||||
keystoreSetting 'azure.client.integration_test.account', "azure_integration_test_account"
|
||||
/* The key is "azure_integration_test_key" encoded using base64 */
|
||||
keystoreSetting 'azure.client.integration_test.key', "YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk="
|
||||
// Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used
|
||||
// in a hacky way to change the protocol and endpoint. We must fix that.
|
||||
setting 'azure.client.integration_test.endpoint_suffix',
|
||||
"ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${ -> azureStorageFixture.addressAndPort }"
|
||||
}
|
||||
keystoreSetting 'azure.client.integration_test.account', 'azure_account'
|
||||
keystoreSetting 'azure.client.integration_test.key', 'azure_key'
|
||||
}
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':plugins:repository-azure', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
plugin ':plugins:repository-azure'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
boolean useFixture = false
|
||||
|
||||
String azureAccount = System.getenv("azure_storage_account")
|
||||
String azureKey = System.getenv("azure_storage_key")
|
||||
String azureContainer = System.getenv("azure_storage_container")
|
||||
String azureBasePath = System.getenv("azure_storage_base_path")
|
||||
|
||||
if (!azureAccount && !azureKey && !azureContainer && !azureBasePath) {
|
||||
azureAccount = 'azure_integration_test_account'
|
||||
azureKey = 'YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk=' // The key is "azure_integration_test_key" encoded using base64
|
||||
azureContainer = 'container_test'
|
||||
azureBasePath = 'integration_test'
|
||||
useFixture = true
|
||||
}
|
||||
|
||||
/** A task to start the fixture which emulates an Azure Storage service **/
|
||||
task azureStorageFixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.azure.AzureStorageFixture', baseDir, azureContainer
|
||||
}
|
||||
|
||||
Map<String, Object> expansions = [
|
||||
'container': azureContainer,
|
||||
'base_path': azureBasePath
|
||||
]
|
||||
processTestResources {
|
||||
inputs.properties(expansions)
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
keystoreSetting 'azure.client.integration_test.account', azureAccount
|
||||
keystoreSetting 'azure.client.integration_test.key', azureKey
|
||||
|
||||
if (useFixture) {
|
||||
dependsOn azureStorageFixture
|
||||
// Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used
|
||||
// in a hacky way to change the protocol and endpoint. We must fix that.
|
||||
setting 'azure.client.integration_test.endpoint_suffix',
|
||||
"ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${ -> azureStorageFixture.addressAndPort }"
|
||||
} else {
|
||||
println "Using an external service to test the repository-azure plugin"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class AzureStorageRepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public AzureStorageRepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings restClientSettings() {
|
||||
// Give more time to repository-azure to complete the snapshot operations
|
||||
return Settings.builder().put(super.restClientSettings())
|
||||
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "60s")
|
||||
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "60s")
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,174 @@
|
|||
# Integration tests for repository-azure
|
||||
---
|
||||
"Snapshot/Restore with repository-azure":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
container: ${container}
|
||||
client: "integration_test"
|
||||
base_path: ${base_path}
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: { repository.settings.container: ${container} }
|
||||
- match: { repository.settings.client : "integration_test" }
|
||||
- match: { repository.settings.base_path : ${base_path} }
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
master_timeout: 5m
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
master_timeout: 5m
|
|
@ -11,177 +11,3 @@
|
|||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: repository-azure }
|
||||
---
|
||||
"Snapshot/Restore with repository-azure":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
container: "container_test"
|
||||
client: "integration_test"
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: {repository.settings.container : "container_test"}
|
||||
- match: {repository.settings.client : "integration_test"}
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
"Deprecated Repository can be registered":
|
||||
- skip:
|
||||
features: warnings
|
||||
- do:
|
||||
warnings:
|
||||
- "[account] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version."
|
||||
snapshot.create_repository:
|
||||
repository: test_repo_azure
|
||||
verify: false
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
account : "my_test_account"
|
||||
container : "backup-container"
|
||||
base_path : "backups"
|
||||
chunk_size: "32m"
|
||||
compress : true
|
||||
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: test_repo_azure
|
||||
|
||||
- is_true : test_repo_azure
|
||||
- match : { test_repo_azure.settings.account : "my_test_account" }
|
||||
- match : { test_repo_azure.settings.container : "backup-container" }
|
||||
- match : { test_repo_azure.settings.base_path : "backups" }
|
||||
- match : { test_repo_azure.settings.chunk_size: "32m" }
|
||||
- match : { test_repo_azure.settings.compress : "true" }
|
||||
---
|
||||
"Default repository can be registered":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repo_azure
|
||||
verify: false
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
container : "backup-container"
|
||||
base_path : "backups"
|
||||
chunk_size: "32m"
|
||||
compress : true
|
||||
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: test_repo_azure
|
||||
|
||||
- is_true : test_repo_azure
|
||||
- match : { test_repo_azure.settings.container : "backup-container" }
|
||||
- match : { test_repo_azure.settings.base_path : "backups" }
|
||||
- match : { test_repo_azure.settings.chunk_size: "32m" }
|
||||
- match : { test_repo_azure.settings.compress : "true" }
|
||||
---
|
||||
"Named client repository can be registered":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repo_azure
|
||||
verify: false
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
client : "secondary"
|
||||
container : "backup-container"
|
||||
base_path : "backups"
|
||||
chunk_size: "32m"
|
||||
compress : true
|
||||
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: test_repo_azure
|
||||
|
||||
- is_true : test_repo_azure
|
||||
- match : { test_repo_azure.settings.client : "secondary" }
|
||||
- match : { test_repo_azure.settings.container : "backup-container" }
|
||||
- match : { test_repo_azure.settings.base_path : "backups" }
|
||||
- match : { test_repo_azure.settings.chunk_size: "32m" }
|
||||
- match : { test_repo_azure.settings.compress : "true" }
|
|
@ -1,9 +1,5 @@
|
|||
---
|
||||
"Get simple field caps from remote cluster":
|
||||
- skip:
|
||||
version: " - 5.4.99"
|
||||
reason: this uses a new API functionality that has been added in 5.5.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: field_caps_index_2
|
||||
|
|
|
@ -90,7 +90,6 @@ public class ContextAndHeaderTransportIT extends HttpSmokeTestCase {
|
|||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,6 @@ public class CorsRegexIT extends HttpSmokeTestCase {
|
|||
.put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true)
|
||||
.put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post")
|
||||
.put(SETTING_CORS_ENABLED.getKey(), true)
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -57,11 +57,15 @@ import static org.hamcrest.Matchers.hasSize;
|
|||
*/
|
||||
public class DeprecationHttpIT extends HttpSmokeTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("force.http.enabled", true)
|
||||
// change values of deprecated settings so that accessing them is logged
|
||||
.put(TEST_DEPRECATED_SETTING_TRUE1.getKey(), ! TEST_DEPRECATED_SETTING_TRUE1.getDefault(Settings.EMPTY))
|
||||
.put(TEST_DEPRECATED_SETTING_TRUE2.getKey(), ! TEST_DEPRECATED_SETTING_TRUE2.getDefault(Settings.EMPTY))
|
||||
|
|
|
@ -38,12 +38,12 @@ import static org.hamcrest.Matchers.is;
|
|||
*/
|
||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class DetailedErrorsDisabledIT extends HttpSmokeTestCase {
|
||||
|
||||
// Build our cluster settings
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.put(HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.getKey(), false)
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -55,13 +55,17 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, nodeTransportTypeKey)
|
||||
.put(NetworkModule.HTTP_TYPE_KEY, nodeHttpTypeKey)
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true).build();
|
||||
.put(NetworkModule.HTTP_TYPE_KEY, nodeHttpTypeKey).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,12 +37,10 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
*/
|
||||
@ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class ResponseHeaderPluginIT extends HttpSmokeTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("force.http.enabled", true)
|
||||
.build();
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -131,9 +131,13 @@ setup() {
|
|||
# The removal must disable the service
|
||||
# see prerm file
|
||||
if is_systemd; then
|
||||
# Debian systemd distros usually returns exit code 3
|
||||
missing_exit_code=4
|
||||
if [ $(systemctl --version | head -1 | awk '{print $2}') -lt 231 ]; then
|
||||
# systemd before version 231 used exit code 3 when the service did not exist
|
||||
missing_exit_code=3
|
||||
fi
|
||||
run systemctl status elasticsearch.service
|
||||
[ "$status" -eq 3 ]
|
||||
[ "$status" -eq $missing_exit_code ]
|
||||
|
||||
run systemctl is-enabled elasticsearch.service
|
||||
[ "$status" -eq 1 ]
|
||||
|
@ -166,7 +170,6 @@ setup() {
|
|||
|
||||
# The service files are still here
|
||||
assert_file_exist "/etc/init.d/elasticsearch"
|
||||
assert_file_exist "/usr/lib/systemd/system/elasticsearch.service"
|
||||
}
|
||||
|
||||
@test "[DEB] purge package" {
|
||||
|
|
|
@ -88,6 +88,8 @@ verify_package_installation() {
|
|||
id elasticsearch
|
||||
|
||||
getent group elasticsearch
|
||||
# homedir is set in /etc/passwd but to a non existent directory
|
||||
assert_file_not_exist $(getent passwd elasticsearch | cut -d: -f6)
|
||||
|
||||
assert_file "$ESHOME" d root root 755
|
||||
assert_file "$ESHOME/bin" d root root 755
|
||||
|
|
|
@ -25,9 +25,6 @@
|
|||
|
||||
---
|
||||
"Empty _id":
|
||||
- skip:
|
||||
version: " - 5.3.0"
|
||||
reason: empty IDs were not rejected until 5.3.1
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
|
@ -62,11 +59,6 @@
|
|||
---
|
||||
"empty action":
|
||||
|
||||
- skip:
|
||||
version: " - 5.4.99"
|
||||
reason: confusing exception messaged caused by empty object fixed in 5.5.0
|
||||
features: ["headers"]
|
||||
|
||||
- do:
|
||||
catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/
|
||||
headers:
|
||||
|
|
|
@ -129,10 +129,6 @@
|
|||
---
|
||||
"Multiple alias names":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: multiple aliases are supported only from 6.0.0 on
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
@ -265,10 +261,6 @@
|
|||
---
|
||||
"Alias sorting":
|
||||
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: sorting was introduced in 5.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
|
|
@ -160,9 +160,6 @@
|
|||
|
||||
---
|
||||
"Test cat indices sort":
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: sorting was introduced in 5.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -60,10 +60,6 @@
|
|||
|
||||
---
|
||||
"Additional disk information":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: additional disk info added in 5.6.0
|
||||
|
||||
- do:
|
||||
cat.nodes:
|
||||
h: diskAvail,diskTotal,diskUsed,diskUsedPercent
|
||||
|
@ -92,10 +88,6 @@
|
|||
|
||||
---
|
||||
"Test cat nodes output with full_id set":
|
||||
- skip:
|
||||
version: " - 5.0.0"
|
||||
reason: The full_id setting was rejected in 5.0.0 see #21266
|
||||
|
||||
|
||||
- do:
|
||||
cat.nodes:
|
||||
|
|
|
@ -46,9 +46,6 @@
|
|||
|
||||
---
|
||||
"Test cat repositories sort":
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: sorting was introduced in 5.1.0
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_cat_repo_1
|
||||
|
|
|
@ -86,10 +86,6 @@
|
|||
---
|
||||
"Test cat segments on closed index behaviour":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: status code on closed indices changed in 6.0.0 from 403 to 400
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index1
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
---
|
||||
"Help":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: seq no stats were added in 6.0.0
|
||||
|
||||
- do:
|
||||
cat.shards:
|
||||
|
@ -219,10 +216,6 @@
|
|||
|
||||
---
|
||||
"Test cat shards sort":
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: sorting was introduced in 5.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: foo
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
---
|
||||
"Help":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
- do:
|
||||
cat.templates:
|
||||
help: true
|
||||
|
@ -17,9 +14,6 @@
|
|||
|
||||
---
|
||||
"No templates":
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: templates were introduced in 5.1.0
|
||||
- do:
|
||||
cat.templates: {}
|
||||
|
||||
|
@ -31,10 +25,6 @@
|
|||
---
|
||||
"Normal templates":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -83,10 +73,6 @@
|
|||
---
|
||||
"Filtered templates":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -125,9 +111,6 @@
|
|||
|
||||
---
|
||||
"Column headers":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -161,9 +144,6 @@
|
|||
|
||||
---
|
||||
"Select columns":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -194,9 +174,6 @@
|
|||
|
||||
---
|
||||
"Sort templates":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -245,9 +222,6 @@
|
|||
|
||||
---
|
||||
"Multiple template":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test_1
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
"bad cluster shard allocation explanation request":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: response status on bad request was changed starting in 5.6.0
|
||||
|
||||
- do:
|
||||
# there aren't any unassigned shards to explain
|
||||
catch: /illegal_argument_exception/
|
||||
|
@ -10,10 +6,6 @@
|
|||
|
||||
---
|
||||
"cluster shard allocation explanation test":
|
||||
- skip:
|
||||
version: " - 5.1.99"
|
||||
reason: explain API response output was changed starting in 5.2.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
@ -40,10 +32,6 @@
|
|||
|
||||
---
|
||||
"cluster shard allocation explanation test with empty request":
|
||||
- skip:
|
||||
version: " - 5.1.99"
|
||||
reason: explain API response output was changed starting in 5.2.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
|
|
@ -64,11 +64,6 @@
|
|||
---
|
||||
"Test get a default settings":
|
||||
|
||||
# this can't be bumped to 5.0.2 until snapshots are published
|
||||
- skip:
|
||||
version: " - 5.0.3"
|
||||
reason: Fetching default group setting was buggy until 5.0.3
|
||||
|
||||
- do:
|
||||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
---
|
||||
"Get an empty remote info":
|
||||
- skip:
|
||||
version: " - 5.3.99"
|
||||
reason: this API doesn't exist in 5.3.x yet
|
||||
- do:
|
||||
cluster.remote_info: {}
|
||||
- is_true: ''
|
||||
|
|
|
@ -7,10 +7,6 @@
|
|||
|
||||
---
|
||||
"get cluster state returns cluster state size with human readable format":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: "cluster state size is only available in v6.0.0 and higher"
|
||||
|
||||
- do:
|
||||
cluster.state:
|
||||
human: true
|
||||
|
|
|
@ -21,10 +21,6 @@
|
|||
---
|
||||
"Internal versioning with explicit version":
|
||||
|
||||
- skip:
|
||||
version: " - 5.1.1"
|
||||
reason: validation logic only fixed from 5.1.2 onwards
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
create:
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
---
|
||||
"External version":
|
||||
|
||||
- skip:
|
||||
version: " - 5.1.1"
|
||||
reason: validation logic only fixed from 5.1.2 onwards
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
create:
|
||||
|
|
|
@ -75,9 +75,6 @@ setup:
|
|||
|
||||
---
|
||||
"Get simple field caps":
|
||||
- skip:
|
||||
version: " - 5.3.99"
|
||||
reason: this uses a new API that has been added in 5.4.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
@ -116,9 +113,6 @@ setup:
|
|||
- is_false: fields.geo.keyword.on_aggregatable_indices
|
||||
---
|
||||
"Get nested field caps":
|
||||
- skip:
|
||||
version: " - 5.3.99"
|
||||
reason: this uses a new API that has been added in 5.4.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
@ -147,9 +141,6 @@ setup:
|
|||
- is_false: fields.object\.nested2.keyword.non_searchable_indices
|
||||
---
|
||||
"Get prefix field caps":
|
||||
- skip:
|
||||
version: " - 5.3.99"
|
||||
reason: this uses a new API that has been added in 5.4.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
@ -168,9 +159,6 @@ setup:
|
|||
|
||||
---
|
||||
"Mix in non-existing field field caps":
|
||||
- skip:
|
||||
version: " - 5.4.0"
|
||||
reason: "#24504 fixed a bug in this API in 5.4.1"
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
|
|
@ -58,9 +58,6 @@
|
|||
|
||||
---
|
||||
"Custom filter in request":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: token filter name changed in 6.0, so this needs to be skipped on mixed clusters
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
|
@ -81,9 +78,6 @@
|
|||
|
||||
---
|
||||
"Synonym filter with tokenizer":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: to support synonym same analysis chain were added in 6.0.0
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_synonym
|
||||
|
@ -114,9 +108,6 @@
|
|||
|
||||
---
|
||||
"Custom normalizer in request":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: normalizer support in 6.0.0
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
|
|
|
@ -5,10 +5,6 @@
|
|||
|
||||
---
|
||||
"clear_cache with request set to false":
|
||||
- skip:
|
||||
version: " - 5.3.99"
|
||||
reason: this name was added in 5.4
|
||||
|
||||
- do:
|
||||
indices.clear_cache:
|
||||
request: false
|
||||
|
|
|
@ -32,9 +32,6 @@
|
|||
|
||||
---
|
||||
"Create index":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: create index response contains index name since 5.6.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -10,9 +10,6 @@ setup:
|
|||
index: index2
|
||||
---
|
||||
"Delete index against alias":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.delete:
|
||||
|
@ -24,9 +21,6 @@ setup:
|
|||
- is_true: index2
|
||||
---
|
||||
"Delete index against alias - ignore unavailable":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
indices.delete:
|
||||
index: alias
|
||||
|
@ -38,9 +32,6 @@ setup:
|
|||
- is_true: index2
|
||||
---
|
||||
"Delete index against alias - multiple indices":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.delete:
|
||||
|
@ -52,9 +43,6 @@ setup:
|
|||
- is_true: index2
|
||||
---
|
||||
"Delete index against alias - ignore unavailable - multiple indices":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
indices.delete:
|
||||
index: alias,index2
|
||||
|
@ -67,9 +55,6 @@ setup:
|
|||
- is_false: index2
|
||||
---
|
||||
"Delete index against wildcard matching alias":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
indices.delete:
|
||||
index: alia*
|
||||
|
@ -80,9 +65,6 @@ setup:
|
|||
- is_true: index2
|
||||
---
|
||||
"Delete index against wildcard matching alias - disallow no indices":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
catch: missing
|
||||
indices.delete:
|
||||
|
@ -95,9 +77,6 @@ setup:
|
|||
- is_true: index2
|
||||
---
|
||||
"Delete index against wildcard matching alias - multiple indices":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
indices.delete:
|
||||
index: alia*,index2
|
||||
|
@ -109,9 +88,6 @@ setup:
|
|||
- is_false: index2
|
||||
---
|
||||
"Delete index against wildcard matching alias - disallow no indices - multiple indices":
|
||||
- skip:
|
||||
version: " - 5.99.0"
|
||||
reason: delete index doesn't support aliases only from 6.0.0 on
|
||||
- do:
|
||||
catch: missing
|
||||
indices.delete:
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
---
|
||||
"Basic test for delete alias":
|
||||
- skip:
|
||||
version: " - 5.4.99"
|
||||
reason: Previous versions did not 404 on missing aliases
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: testind
|
||||
|
|
|
@ -84,10 +84,6 @@ setup:
|
|||
|
||||
---
|
||||
"check delete with index list":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.delete_alias:
|
||||
index: "test_index1,test_index2"
|
||||
|
@ -110,10 +106,6 @@ setup:
|
|||
|
||||
---
|
||||
"check delete with prefix* index":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.delete_alias:
|
||||
index: "test_*"
|
||||
|
@ -137,10 +129,6 @@ setup:
|
|||
|
||||
---
|
||||
"check delete with index list and * aliases":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.delete_alias:
|
||||
index: "test_index1,test_index2"
|
||||
|
@ -164,10 +152,6 @@ setup:
|
|||
|
||||
---
|
||||
"check delete with index list and _all aliases":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.delete_alias:
|
||||
index: "test_index1,test_index2"
|
||||
|
@ -191,10 +175,6 @@ setup:
|
|||
|
||||
---
|
||||
"check delete with index list and wildcard aliases":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.delete_alias:
|
||||
index: "test_index1,test_index2"
|
||||
|
|
|
@ -7,10 +7,6 @@ setup:
|
|||
---
|
||||
"Test indices.exists_template":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.exists_template:
|
||||
name: test
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
---
|
||||
"Exists type":
|
||||
- skip:
|
||||
# this will only run in a mixed cluster environment with at least 1 5.x node
|
||||
version: "5.99.99 - "
|
||||
reason: multiple types are not supported on 6.x indices onwards
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_1
|
||||
body:
|
||||
mappings:
|
||||
type_1: {}
|
||||
type_2: {}
|
||||
|
||||
- do:
|
||||
indices.exists_type:
|
||||
|
|
|
@ -137,10 +137,6 @@ setup:
|
|||
---
|
||||
"Should return test_index_3 if expand_wildcards=closed":
|
||||
|
||||
- skip:
|
||||
version: " - 2.0.0"
|
||||
reason: Requires fix for issue 7258
|
||||
|
||||
- do:
|
||||
indices.get:
|
||||
index: test_index_*
|
||||
|
@ -162,9 +158,6 @@ setup:
|
|||
|
||||
---
|
||||
"Should return an exception when querying invalid indices":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: "bad request logic added in 6.0.0"
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
|
|
|
@ -42,10 +42,6 @@ setup:
|
|||
|
||||
---
|
||||
"Get aliases via /_all/_alias/":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: myindex
|
||||
|
@ -62,10 +58,6 @@ setup:
|
|||
|
||||
---
|
||||
"Get aliases via /*/_alias/":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: myindex
|
||||
|
@ -82,10 +74,6 @@ setup:
|
|||
|
||||
---
|
||||
"Get and index with no aliases via /{index}/_alias/":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: myindex
|
||||
|
@ -222,10 +210,6 @@ setup:
|
|||
|
||||
---
|
||||
"Non-existent alias on an existing index returns 404":
|
||||
- skip:
|
||||
version: " - 5.4.99"
|
||||
reason: Previous versions did not 404 on missing aliases
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.get_alias:
|
||||
|
@ -237,10 +221,6 @@ setup:
|
|||
|
||||
---
|
||||
"Existent and non-existent alias returns 404 and the existing alias":
|
||||
- skip:
|
||||
version: " - 5.4.99"
|
||||
reason: Previous versions did not 404 on missing aliases
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.get_alias:
|
||||
|
@ -253,10 +233,6 @@ setup:
|
|||
|
||||
---
|
||||
"Existent and non-existent aliases returns 404 and the existing alias":
|
||||
- skip:
|
||||
version: " - 5.4.99"
|
||||
reason: Previous versions did not 404 on missing aliases
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.get_alias:
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
---
|
||||
"Non-existent type returns 404":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: Previous versions did not 404 on missing types
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
@ -25,9 +22,6 @@
|
|||
|
||||
---
|
||||
"No type matching pattern returns 404":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: Previous versions did not 404 on missing types
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
@ -51,9 +45,6 @@
|
|||
|
||||
---
|
||||
"Existent and non-existent type returns 404 and the existing type":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: Previous versions did not 404 on missing types
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
@ -77,9 +68,6 @@
|
|||
|
||||
---
|
||||
"Existent and non-existent types returns 404 and the existing type":
|
||||
- skip:
|
||||
version: " - 5.5.99"
|
||||
reason: Previous versions did not 404 on missing types
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
@ -103,9 +91,6 @@
|
|||
|
||||
---
|
||||
"Type missing when no types exist":
|
||||
- skip:
|
||||
version: " - 5.0.2"
|
||||
reason: there was a bug prior to 5.0.2
|
||||
- do:
|
||||
catch: missing
|
||||
indices.get_mapping:
|
||||
|
|
|
@ -94,10 +94,6 @@ setup:
|
|||
|
||||
---
|
||||
"Get test-* with wildcard_expansion=none":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this was a breaking change in 6.0
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.get_mapping:
|
||||
|
|
|
@ -1,166 +0,0 @@
|
|||
---
|
||||
setup:
|
||||
|
||||
- skip:
|
||||
# this will only run in a mixed cluster environment with at least 1 5.x node
|
||||
version: "5.99.99 - "
|
||||
reason: multiple types are not supported on 6.x indices onwards
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_1
|
||||
body:
|
||||
mappings:
|
||||
type_1: {}
|
||||
type_2: {}
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_2
|
||||
body:
|
||||
mappings:
|
||||
type_2: {}
|
||||
type_3: {}
|
||||
|
||||
---
|
||||
"Get /_mapping":
|
||||
|
||||
- do:
|
||||
indices.get_mapping: {}
|
||||
|
||||
- is_true: test_1.mappings.type_1
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_true: test_2.mappings.type_2
|
||||
- is_true: test_2.mappings.type_3
|
||||
|
||||
---
|
||||
"Get /{index}/_mapping":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1
|
||||
|
||||
- is_true: test_1.mappings.type_1
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_false: test_2
|
||||
|
||||
|
||||
---
|
||||
"Get /{index}/_mapping/_all":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1
|
||||
type: _all
|
||||
|
||||
- is_true: test_1.mappings.type_1
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_false: test_2
|
||||
|
||||
---
|
||||
"Get /{index}/_mapping/*":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1
|
||||
type: '*'
|
||||
|
||||
- is_true: test_1.mappings.type_1
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_false: test_2
|
||||
|
||||
---
|
||||
"Get /{index}/_mapping/{type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1
|
||||
type: type_1
|
||||
|
||||
- is_false: test_1.mappings.type_2
|
||||
- is_false: test_2
|
||||
|
||||
---
|
||||
"Get /{index}/_mapping/{type,type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1
|
||||
type: type_1,type_2
|
||||
|
||||
- is_true: test_1.mappings.type_1
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_false: test_2
|
||||
|
||||
---
|
||||
"Get /{index}/_mapping/{type*}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1
|
||||
type: '*2'
|
||||
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_false: test_1.mappings.type_1
|
||||
- is_false: test_2
|
||||
|
||||
---
|
||||
"Get /_mapping/{type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
type: type_2
|
||||
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_true: test_2.mappings.type_2
|
||||
- is_false: test_1.mappings.type_1
|
||||
- is_false: test_2.mappings.type_3
|
||||
|
||||
---
|
||||
"Get /_all/_mapping/{type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: _all
|
||||
type: type_2
|
||||
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_true: test_2.mappings.type_2
|
||||
- is_false: test_1.mappings.type_1
|
||||
- is_false: test_2.mappings.type_3
|
||||
|
||||
---
|
||||
"Get /*/_mapping/{type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: '*'
|
||||
type: type_2
|
||||
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_true: test_2.mappings.type_2
|
||||
- is_false: test_1.mappings.type_1
|
||||
- is_false: test_2.mappings.type_3
|
||||
|
||||
---
|
||||
"Get /index,index/_mapping/{type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: test_1,test_2
|
||||
type: type_2
|
||||
|
||||
- is_true: test_1.mappings.type_2
|
||||
- is_true: test_2.mappings.type_2
|
||||
- is_false: test_2.mappings.type_3
|
||||
|
||||
---
|
||||
"Get /index*/_mapping/{type}":
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: '*2'
|
||||
type: type_2
|
||||
|
||||
- is_true: test_2.mappings.type_2
|
||||
- is_false: test_1
|
||||
- is_false: test_2.mappings.type_3
|
|
@ -11,10 +11,6 @@ setup:
|
|||
---
|
||||
"Get template":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.get_template:
|
||||
name: test
|
||||
|
@ -25,10 +21,6 @@ setup:
|
|||
---
|
||||
"Get all templates":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test2
|
||||
|
@ -46,10 +38,6 @@ setup:
|
|||
---
|
||||
"Get template with local flag":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.get_template:
|
||||
name: test
|
||||
|
@ -60,10 +48,6 @@ setup:
|
|||
---
|
||||
"Get template with flat settings and master timeout":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.get_template:
|
||||
name: test
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
---
|
||||
"Basic test for index open/close":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: status code on closed indices changed in 6.0.0 from 403 to 400
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
|
|
@ -23,10 +23,6 @@ setup:
|
|||
|
||||
---
|
||||
"All indices":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: status code on closed indices changed in 6.0.0 from 403 to 400
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: _all
|
||||
|
@ -50,10 +46,6 @@ setup:
|
|||
|
||||
---
|
||||
"Trailing wildcard":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: status code on closed indices changed in 6.0.0 from 403 to 400
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_*
|
||||
|
@ -77,10 +69,6 @@ setup:
|
|||
|
||||
---
|
||||
"Only wildcard":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: status code on closed indices changed in 6.0.0 from 403 to 400
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: '*'
|
||||
|
|
|
@ -30,9 +30,6 @@
|
|||
|
||||
---
|
||||
"Can't create alias with invalid characters":
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: alias name validation was introduced in 5.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -14,10 +14,6 @@ setup:
|
|||
|
||||
---
|
||||
"put alias per index":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.put_alias:
|
||||
index: test_index1
|
||||
|
@ -72,10 +68,6 @@ setup:
|
|||
|
||||
---
|
||||
"put alias prefix* index":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.put_alias:
|
||||
index: "test_*"
|
||||
|
@ -91,10 +83,6 @@ setup:
|
|||
|
||||
---
|
||||
"put alias in list of indices":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: only requested indices are included in 6.x
|
||||
|
||||
- do:
|
||||
indices.put_alias:
|
||||
index: "test_index1,test_index2"
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
---
|
||||
"Put template":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -25,10 +21,6 @@
|
|||
---
|
||||
"Put multiple template":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -49,10 +41,6 @@
|
|||
---
|
||||
"Put template with aliases":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -77,10 +65,6 @@
|
|||
---
|
||||
"Put template create":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: test
|
||||
|
@ -113,10 +97,6 @@
|
|||
---
|
||||
"Test Put Versioned Template":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: "my_template"
|
||||
|
@ -214,10 +194,6 @@
|
|||
---
|
||||
"Put index template without index_patterns":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: the error message is updated in v6.0.0
|
||||
|
||||
- do:
|
||||
catch: /index patterns are missing/
|
||||
indices.put_template:
|
||||
|
|
|
@ -76,10 +76,6 @@
|
|||
|
||||
---
|
||||
"Rollover no condition matched":
|
||||
- skip:
|
||||
version: " - 5.0.0"
|
||||
reason: bug fixed in 5.0.1
|
||||
|
||||
# create index with alias
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -108,10 +104,6 @@
|
|||
---
|
||||
"Rollover with dry-run but target index exists":
|
||||
|
||||
- skip:
|
||||
version: " - 5.0.0"
|
||||
reason: bug fixed in 5.0.1 - dry run was returning just fine even if the index exists
|
||||
|
||||
# create index with alias
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -43,10 +43,6 @@
|
|||
|
||||
---
|
||||
"closed segments test":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: status code on closed indices changed in 6.0.0 from 403 to 400
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index1
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
---
|
||||
"Shrink index ignores target template mapping":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: bug fixed in 5.6.0
|
||||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
# Get master node id
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
---
|
||||
"Index Sort":
|
||||
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new feature that has been added in 6.0.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
|
|
@ -102,9 +102,6 @@ setup:
|
|||
|
||||
---
|
||||
"Indices stats unrecognized parameter":
|
||||
- skip:
|
||||
version: " - 5.0.99"
|
||||
reason: strict stats handling does not exist in 5.0
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.stats:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue