Merge remote-tracking branch 'es/7.x' into enrich-7.x

This commit is contained in:
Martijn van Groningen 2019-04-09 09:59:24 +02:00
commit 5a1d5cca4f
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
196 changed files with 2438 additions and 1879 deletions

View File

@ -338,14 +338,6 @@ gradle.projectsEvaluated {
integTest.mustRunAfter test integTest.mustRunAfter test
} }
configurations.all { Configuration configuration -> configurations.all { Configuration configuration ->
/*
* The featureAwarePlugin configuration has a dependency on x-pack:plugin:core and x-pack:plugin:core has a dependency on the
* featureAwarePlugin configuration. The below task ordering logic would force :x-pack:plugin:core:test
* :x-pack:test:feature-aware:test to depend on each other circularly. We break that cycle here.
*/
if (configuration.name == "featureAwarePlugin") {
return
}
dependencies.all { Dependency dep -> dependencies.all { Dependency dep ->
Project upstreamProject = dependencyToProject(dep) Project upstreamProject = dependencyToProject(dep)
if (upstreamProject != null) { if (upstreamProject != null) {
@ -357,7 +349,7 @@ gradle.projectsEvaluated {
Task task = project.tasks.findByName(taskName) Task task = project.tasks.findByName(taskName)
Task upstreamTask = upstreamProject.tasks.findByName(taskName) Task upstreamTask = upstreamProject.tasks.findByName(taskName)
if (task != null && upstreamTask != null) { if (task != null && upstreamTask != null) {
task.mustRunAfter(upstreamTask) task.shouldRunAfter(upstreamTask)
} }
} }
} }
@ -382,21 +374,6 @@ allprojects {
// also ignore other possible build dirs // also ignore other possible build dirs
excludeDirs += file('build') excludeDirs += file('build')
excludeDirs += file('build-eclipse') excludeDirs += file('build-eclipse')
iml {
// fix so that Gradle idea plugin properly generates support for resource folders
// see also https://issues.gradle.org/browse/GRADLE-2975
withXml {
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each {
it.attributes().remove('isTestSource')
it.attributes().put('type', 'java-resource')
}
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each {
it.attributes().remove('isTestSource')
it.attributes().put('type', 'java-test-resource')
}
}
}
} }
} }
@ -414,14 +391,6 @@ idea {
vcs = 'Git' vcs = 'Git'
} }
} }
// Make sure gradle idea was run before running anything in intellij (including import).
File ideaMarker = new File(projectDir, '.local-idea-is-configured')
tasks.idea.doLast {
ideaMarker.setText('', 'UTF-8')
}
if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) {
throw new GradleException('You must run `./gradlew idea` from the root of elasticsearch before importing into IntelliJ')
}
// eclipse configuration // eclipse configuration
allprojects { allprojects {

View File

@ -687,8 +687,7 @@ class ClusterFormationTasks {
static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) {
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec ->
exec.workingDir node.cwd exec.workingDir node.cwd
// TODO: this must change to 7.0.0 after bundling java has been backported if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("7.0.0")) ||
if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("8.0.0")) ||
node.config.distribution == 'integ-test-zip') { node.config.distribution == 'integ-test-zip') {
exec.environment.put('JAVA_HOME', project.runtimeJavaHome) exec.environment.put('JAVA_HOME', project.runtimeJavaHome)
} else { } else {
@ -714,7 +713,7 @@ class ClusterFormationTasks {
ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true, ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true,
dir: node.cwd, taskname: 'elasticsearch') { dir: node.cwd, taskname: 'elasticsearch') {
node.env.each { key, value -> env(key: key, value: value) } node.env.each { key, value -> env(key: key, value: value) }
if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("8.0.0")) || if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("7.0.0")) ||
node.config.distribution == 'integ-test-zip') { node.config.distribution == 'integ-test-zip') {
env(key: 'JAVA_HOME', value: project.runtimeJavaHome) env(key: 'JAVA_HOME', value: project.runtimeJavaHome)
} }

View File

@ -32,7 +32,7 @@ bouncycastle = 1.61
# test dependencies # test dependencies
randomizedrunner = 2.7.1 randomizedrunner = 2.7.1
junit = 4.12 junit = 4.12
httpclient = 4.5.7 httpclient = 4.5.8
httpcore = 4.4.11 httpcore = 4.4.11
httpasyncclient = 4.1.4 httpasyncclient = 4.1.4
commonslogging = 1.1.3 commonslogging = 1.1.3

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -7,7 +7,7 @@ source "`dirname "$0"`"/elasticsearch-env
IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES" IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES"
for additional_source in "${additional_sources[@]}" for additional_source in "${additional_sources[@]}"
do do
source "`dirname "$0"`"/$additional_source source "$ES_HOME"/bin/$additional_source
done done
IFS=';' read -r -a additional_classpath_directories <<< "$ES_ADDITIONAL_CLASSPATH_DIRECTORIES" IFS=';' read -r -a additional_classpath_directories <<< "$ES_ADDITIONAL_CLASSPATH_DIRECTORIES"

View File

@ -117,6 +117,3 @@ ${error.file}
# due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise
# time/date parsing will break in an incompatible way for some date patterns and locals # time/date parsing will break in an incompatible way for some date patterns and locals
9-:-Djava.locale.providers=COMPAT 9-:-Djava.locale.providers=COMPAT
# temporary workaround for C2 bug with JDK 10 on hardware with AVX-512
10-:-XX:UseAVX=2

View File

@ -1,7 +1,10 @@
[[java-query-dsl-type-query]] [[java-query-dsl-type-query]]
==== Type Query ==== Type Query
deprecated[7.0.0, Types are being removed, prefer filtering on a field instead. For more information, please see {ref}/removal-of-types.html[Removal of mapping types].] deprecated[7.0.0]
Types are being removed, prefer filtering on a field instead. For
more information, see {ref}/removal-of-types.html[Removal of mapping types].
See {ref}/query-dsl-type-query.html[Type Query] See {ref}/query-dsl-type-query.html[Type Query]

View File

@ -15,7 +15,9 @@ our writing index. We wish to roll over the index after it reaches a size
of 50 gigabytes, or has been created 30 days ago, and then delete the index of 50 gigabytes, or has been created 30 days ago, and then delete the index
after 90 days. after 90 days.
=== Setting up a new policy [float]
[[ilm-gs-create-policy]]
=== Setting up a policy
There are many new features introduced by {ilm-init}, but we will only focus on There are many new features introduced by {ilm-init}, but we will only focus on
a few that are needed for our example. For starters, we will use the a few that are needed for our example. For starters, we will use the
@ -64,6 +66,8 @@ the index being written to after it reaches 50 gigabytes, or it is 30
days old. The rollover will occur when either of these conditions is true. days old. The rollover will occur when either of these conditions is true.
The index will be deleted 90 days after it is rolled over. The index will be deleted 90 days after it is rolled over.
[float]
[[ilm-gs-apply-policy]]
=== Applying a policy to our index === Applying a policy to our index
There are <<set-up-lifecycle-policy,a few ways>> to associate a There are <<set-up-lifecycle-policy,a few ways>> to associate a
@ -135,6 +139,8 @@ index being the index that is being written to at a time. Rollover swaps
the write index to be the new index created from rollover, and sets the the write index to be the new index created from rollover, and sets the
alias to be read-only for the source index. alias to be read-only for the source index.
[float]
[[ilm-gs-check-progress]]
=== Checking progress === Checking progress
Now that we have an index managed by our policy, how do we tell what is going Now that we have an index managed by our policy, how do we tell what is going

View File

@ -285,6 +285,12 @@ Other index settings are available in index modules:
Control over the transaction log and background flush operations. Control over the transaction log and background flush operations.
[float]
=== [xpack]#{xpack} index settings#
<<ilm-settings,{ilm-cap}>>::
Specify the lifecycle policy and rollover alias for an index.
-- --
include::index-modules/analysis.asciidoc[] include::index-modules/analysis.asciidoc[]

View File

@ -137,7 +137,7 @@ understands this to mean `2016-04-01` as is explained in the <<date-math-index-n
| `field` | yes | - | The field to get the date or timestamp from. | `field` | yes | - | The field to get the date or timestamp from.
| `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date. Supports <<accessing-template-fields,template snippets>>. | `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date. Supports <<accessing-template-fields,template snippets>>.
| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <<accessing-template-fields,template snippets>>. | `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <<accessing-template-fields,template snippets>>.
| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSXX | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.
| `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. | `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.
| `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. | `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.
| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid java time pattern is expected here. Supports <<accessing-template-fields,template snippets>>. | `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid java time pattern is expected here. Supports <<accessing-template-fields,template snippets>>.

View File

@ -5,7 +5,7 @@ experimental[]
A `dense_vector` field stores dense vectors of float values. A `dense_vector` field stores dense vectors of float values.
The maximum number of dimensions that can be in a vector should The maximum number of dimensions that can be in a vector should
not exceed 500. The number of dimensions can be not exceed 1024. The number of dimensions can be
different across documents. A `dense_vector` field is different across documents. A `dense_vector` field is
a single-valued field. a single-valued field.

View File

@ -5,7 +5,7 @@ experimental[]
A `sparse_vector` field stores sparse vectors of float values. A `sparse_vector` field stores sparse vectors of float values.
The maximum number of dimensions that can be in a vector should The maximum number of dimensions that can be in a vector should
not exceed 500. The number of dimensions can be not exceed 1024. The number of dimensions can be
different across documents. A `sparse_vector` field is different across documents. A `sparse_vector` field is
a single-valued field. a single-valued field.

View File

@ -9,7 +9,6 @@ your application to Elasticsearch 7.0.
See also <<release-highlights>> and <<es-release-notes>>. See also <<release-highlights>> and <<es-release-notes>>.
* <<breaking_70_notable>>
* <<breaking_70_aggregations_changes>> * <<breaking_70_aggregations_changes>>
* <<breaking_70_cluster_changes>> * <<breaking_70_cluster_changes>>
* <<breaking_70_discovery_changes>> * <<breaking_70_discovery_changes>>
@ -32,13 +31,7 @@ See also <<release-highlights>> and <<es-release-notes>>.
* <<breaking_70_java_time_changes>> * <<breaking_70_java_time_changes>>
[float] [float]
[[breaking_70_notable]] === Indices created before 7.0
=== Notable changes
// NOTE: The content in this section is also used in the Installation and Upgrade Guide.
//tag::notable-breaking-changes[]
[float]
==== Indices created before 7.0
Elasticsearch 7.0 can read indices created in version 6.0 or above. An Elasticsearch 7.0 can read indices created in version 6.0 or above. An
Elasticsearch 7.0 node will not start in the presence of indices created in a Elasticsearch 7.0 node will not start in the presence of indices created in a
@ -53,8 +46,6 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
========================================= =========================================
// end::notable-breaking-changes[]
include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/aggregations.asciidoc[]
include::migrate_7_0/analysis.asciidoc[] include::migrate_7_0/analysis.asciidoc[]
include::migrate_7_0/cluster.asciidoc[] include::migrate_7_0/cluster.asciidoc[]

View File

@ -2,6 +2,14 @@
[[breaking_70_aggregations_changes]] [[breaking_70_aggregations_changes]]
=== Aggregations changes === Aggregations changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Deprecated `global_ordinals_hash` and `global_ordinals_low_cardinality` execution hints for terms aggregations have been removed ==== Deprecated `global_ordinals_hash` and `global_ordinals_low_cardinality` execution hints for terms aggregations have been removed

View File

@ -2,6 +2,13 @@
[[breaking_70_analysis_changes]] [[breaking_70_analysis_changes]]
=== Analysis changes === Analysis changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Limiting the number of tokens produced by _analyze ==== Limiting the number of tokens produced by _analyze

View File

@ -2,6 +2,14 @@
[[breaking_70_api_changes]] [[breaking_70_api_changes]]
=== API changes === API changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Internal Versioning is no longer supported for optimistic concurrency control ==== Internal Versioning is no longer supported for optimistic concurrency control

View File

@ -2,6 +2,13 @@
[[breaking_70_cluster_changes]] [[breaking_70_cluster_changes]]
=== Cluster changes === Cluster changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== `:` is no longer allowed in cluster name ==== `:` is no longer allowed in cluster name

View File

@ -2,6 +2,13 @@
[[breaking_70_discovery_changes]] [[breaking_70_discovery_changes]]
=== Discovery changes === Discovery changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Cluster bootstrapping is required if discovery is configured ==== Cluster bootstrapping is required if discovery is configured

View File

@ -2,6 +2,13 @@
[[breaking_70_indices_changes]] [[breaking_70_indices_changes]]
=== Indices changes === Indices changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Index creation no longer defaults to five shards ==== Index creation no longer defaults to five shards
Previous versions of Elasticsearch defaulted to creating five shards per index. Previous versions of Elasticsearch defaulted to creating five shards per index.

View File

@ -2,6 +2,13 @@
[[breaking_70_java_changes]] [[breaking_70_java_changes]]
=== Java API changes === Java API changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== `isShardsAcked` deprecated in `6.2` has been removed ==== `isShardsAcked` deprecated in `6.2` has been removed

View File

@ -1,3 +1,10 @@
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
[[breaking_70_java_time_changes]] [[breaking_70_java_time_changes]]
=== Replacing Joda-Time with java time === Replacing Joda-Time with java time

View File

@ -2,6 +2,13 @@
[[breaking_70_logging_changes]] [[breaking_70_logging_changes]]
=== Logging changes === Logging changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== New JSON format log files in `log` directory ==== New JSON format log files in `log` directory

View File

@ -2,6 +2,13 @@
[[breaking_70_low_level_restclient_changes]] [[breaking_70_low_level_restclient_changes]]
=== Low-level REST client changes === Low-level REST client changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Support for `maxRetryTimeout` removed from RestClient ==== Support for `maxRetryTimeout` removed from RestClient

View File

@ -2,6 +2,13 @@
[[breaking_70_mappings_changes]] [[breaking_70_mappings_changes]]
=== Mapping changes === Mapping changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== The `_all` meta field is removed ==== The `_all` meta field is removed

View File

@ -2,6 +2,13 @@
[[breaking_70_node_changes]] [[breaking_70_node_changes]]
=== Node start up === Node start up
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Nodes with left-behind data or metadata refuse to start ==== Nodes with left-behind data or metadata refuse to start
Repurposing an existing node by changing node.master or node.data to false can leave lingering on-disk metadata and Repurposing an existing node by changing node.master or node.data to false can leave lingering on-disk metadata and

View File

@ -2,6 +2,13 @@
[[breaking_70_packaging_changes]] [[breaking_70_packaging_changes]]
=== Packaging changes === Packaging changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
[[systemd-service-file-config]] [[systemd-service-file-config]]
==== systemd service file is no longer configuration ==== systemd service file is no longer configuration

View File

@ -2,6 +2,13 @@
[[breaking_70_plugins_changes]] [[breaking_70_plugins_changes]]
=== Plugins changes === Plugins changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Azure Repository plugin ==== Azure Repository plugin

View File

@ -2,6 +2,13 @@
[[breaking_70_restclient_changes]] [[breaking_70_restclient_changes]]
=== High-level REST client changes === High-level REST client changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== API methods accepting `Header` argument have been removed ==== API methods accepting `Header` argument have been removed

View File

@ -2,6 +2,14 @@
[[breaking_70_scripting_changes]] [[breaking_70_scripting_changes]]
=== Scripting changes === Scripting changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== getDate() and getDates() removed ==== getDate() and getDates() removed

View File

@ -2,6 +2,13 @@
[[breaking_70_search_changes]] [[breaking_70_search_changes]]
=== Search and Query DSL changes === Search and Query DSL changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Off-heap terms index ==== Off-heap terms index

View File

@ -2,6 +2,13 @@
[[breaking_70_settings_changes]] [[breaking_70_settings_changes]]
=== Settings changes === Settings changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== The default for `node.name` is now the hostname ==== The default for `node.name` is now the hostname

View File

@ -2,6 +2,13 @@
[[breaking_70_snapshotstats_changes]] [[breaking_70_snapshotstats_changes]]
=== Snapshot stats changes === Snapshot stats changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
Snapshot stats details are provided in a new structured way: Snapshot stats details are provided in a new structured way:
* `total` section for all the files that are referenced by the snapshot. * `total` section for all the files that are referenced by the snapshot.

View File

@ -2,6 +2,13 @@
[[breaking_70_suggesters_changes]] [[breaking_70_suggesters_changes]]
=== Suggesters changes === Suggesters changes
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[float] [float]
==== Registration of suggesters in plugins has changed ==== Registration of suggesters in plugins has changed

View File

@ -81,7 +81,7 @@ run the following command:
["source","sh",subs="attributes,callouts"] ["source","sh",subs="attributes,callouts"]
---------------------------------------------------------------------- ----------------------------------------------------------------------
metricbeat modules enable elasticsearch metricbeat modules enable elasticsearch-xpack
---------------------------------------------------------------------- ----------------------------------------------------------------------
For more information, see For more information, see
@ -89,32 +89,9 @@ For more information, see
{metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. {metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module].
-- --
.. Configure the {es} module in {metricbeat}. + .. By default the module will collect {es} monitoring metrics from `http://localhost:9200`.
+ If the local {es} node has a different address, you must specify it via the `hosts` setting
-- in the `modules.d/elasticsearch-xpack.yml` file.
You must specify the following settings in the `modules.d/elasticsearch.yml` file:
[source,yaml]
----------------------------------
- module: elasticsearch
metricsets:
- ccr
- cluster_stats
- index
- index_recovery
- index_summary
- ml_job
- node_stats
- shard
period: 10s
hosts: ["http://localhost:9200"] <1>
xpack.enabled: true <2>
----------------------------------
<1> This setting identifies the host and port number that are used to access {es}.
<2> This setting ensures that {kib} can read this monitoring data successfully.
That is to say, it's stored in the same location and format as monitoring data
that is sent by <<es-monitoring-exporters,exporters>>.
--
.. If Elastic {security-features} are enabled, you must also provide a user ID .. If Elastic {security-features} are enabled, you must also provide a user ID
and password so that {metricbeat} can collect metrics successfully. and password so that {metricbeat} can collect metrics successfully.
@ -127,7 +104,7 @@ Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` b
file. file.
+ +
-- --
For example, add the following settings in the `modules.d/elasticsearch.yml` file: For example, add the following settings in the `modules.d/elasticsearch-xpack.yml` file:
[source,yaml] [source,yaml]
---------------------------------- ----------------------------------
@ -140,7 +117,7 @@ For example, add the following settings in the `modules.d/elasticsearch.yml` fil
.. If you configured {es} to use <<configuring-tls,encrypted communications>>, .. If you configured {es} to use <<configuring-tls,encrypted communications>>,
you must access it via HTTPS. For example, use a `hosts` setting like you must access it via HTTPS. For example, use a `hosts` setting like
`https://localhost:9200` in the `modules.d/elasticsearch.yml` file. `https://localhost:9200` in the `modules.d/elasticsearch-xpack.yml` file.
.. Identify where to send the monitoring data. + .. Identify where to send the monitoring data. +
+ +

View File

@ -6,6 +6,7 @@
This section summarizes the changes in each release. This section summarizes the changes in each release.
* <<release-notes-7.0.0>>
* <<release-notes-7.0.0-rc2>> * <<release-notes-7.0.0-rc2>>
* <<release-notes-7.0.0-rc1>> * <<release-notes-7.0.0-rc1>>
* <<release-notes-7.0.0-beta1>> * <<release-notes-7.0.0-beta1>>
@ -14,6 +15,7 @@ This section summarizes the changes in each release.
-- --
include::release-notes/7.0.0.asciidoc[]
include::release-notes/7.0.0-rc2.asciidoc[] include::release-notes/7.0.0-rc2.asciidoc[]
include::release-notes/7.0.0-rc1.asciidoc[] include::release-notes/7.0.0-rc1.asciidoc[]
include::release-notes/7.0.0-beta1.asciidoc[] include::release-notes/7.0.0-beta1.asciidoc[]

View File

@ -0,0 +1,10 @@
[[release-notes-7.0.0]]
== {es} version 7.0.0
coming[7.0.0]
//These release notes include all changes made in the alpha, beta, and RC
//releases of 7.0.0.
//Also see <<breaking-changes-7.0,Breaking changes in 7.0.0>>.

View File

@ -0,0 +1,15 @@
[role="xpack"]
[[ilm-settings]]
=== {ilm-cap} settings
These index-level {ilm-init} settings are typically configured through index
templates. For more information, see <<ilm-gs-create-policy>>.
`index.lifecycle.name`::
The name of the policy to use to manage the index.
`index.lifecycle.rollover_alias`::
The index alias to update when the index rolls over. Specify when using a
policy that contains a rollover action. When the index rolls over, the alias is
updated to reflect that the index is no longer the write index. For more
information about rollover, see <<using-policies-rollover>>.

View File

@ -52,6 +52,8 @@ include::settings/audit-settings.asciidoc[]
include::settings/ccr-settings.asciidoc[] include::settings/ccr-settings.asciidoc[]
include::settings/ilm-settings.asciidoc[]
include::settings/license-settings.asciidoc[] include::settings/license-settings.asciidoc[]
include::settings/ml-settings.asciidoc[] include::settings/ml-settings.asciidoc[]

View File

@ -5,8 +5,9 @@
The images use https://hub.docker.com/_/centos/[centos:7] as the base image. The images use https://hub.docker.com/_/centos/[centos:7] as the base image.
A list of all published Docker images and tags is available at A list of all published Docker images and tags is available at
https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in https://www.docker.elastic.co[www.docker.elastic.co]. The source files
https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub]. are in
https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[Github].
These images are free to use under the Elastic license. They contain open source These images are free to use under the Elastic license. They contain open source
and free commercial features and access to paid commercial features. and free commercial features and access to paid commercial features.

View File

@ -37,7 +37,6 @@ deprecation warnings are logged when the log level is set to `WARN`.
to your code and configuration for {version}. to your code and configuration for {version}.
. If you use custom plugins, make sure compatible versions are available. . If you use custom plugins, make sure compatible versions are available.
. Test upgrades in a dev environment before upgrading your production cluster. . Test upgrades in a dev environment before upgrading your production cluster.
before upgrading.
. <<modules-snapshots,Back up your data!>> You must have a snapshot of your . <<modules-snapshots,Back up your data!>> You must have a snapshot of your
data to roll back to an earlier version. data to roll back to an earlier version.

View File

@ -107,7 +107,7 @@ You can check progress by submitting a <<cat-health,`_cat/health`>> request:
[source,sh] [source,sh]
-------------------------------------------------- --------------------------------------------------
GET _cat/health GET _cat/health?v
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE

View File

@ -240,30 +240,3 @@ class org.elasticsearch.index.query.IntervalFilterScript$Interval {
int getEnd() int getEnd()
int getGaps() int getGaps()
} }
# for testing
class org.elasticsearch.painless.FeatureTest no_import {
int z
()
(int,int)
int getX()
int getY()
Integer getI()
void setX(int)
void setY(int)
void setI(Integer)
boolean overloadedStatic()
boolean overloadedStatic(boolean)
int staticNumberTest(Number)
Double mixedAdd(int, Byte, char, Float)
Object twoFunctionsOfX(Function,Function)
void listInput(List)
int org.elasticsearch.painless.FeatureTestAugmentation getTotal()
int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int)
}
# for testing
static_import {
int staticAddIntsTest(int, int) from_class org.elasticsearch.painless.StaticTest
float staticAddFloatsTest(float, float) from_class org.elasticsearch.painless.FeatureTest
}

View File

@ -190,13 +190,13 @@ public class AugmentationTests extends ScriptTestCase {
} }
public void testFeatureTest() { public void testFeatureTest() {
assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + assertEquals(5, exec("org.elasticsearch.painless.FeatureTestObject ft = new org.elasticsearch.painless.FeatureTestObject();" +
" ft.setX(3); ft.setY(2); return ft.getTotal()")); " ft.setX(3); ft.setY(2); return ft.getTotal()"));
assertEquals(5, exec("def ft = new org.elasticsearch.painless.FeatureTest();" + assertEquals(5, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" +
" ft.setX(3); ft.setY(2); return ft.getTotal()")); " ft.setX(3); ft.setY(2); return ft.getTotal()"));
assertEquals(8, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + assertEquals(8, exec("org.elasticsearch.painless.FeatureTestObject ft = new org.elasticsearch.painless.FeatureTestObject();" +
" ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); " ft.setX(3); ft.setY(2); return ft.addToTotal(3)"));
assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTest();" + assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" +
" ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); " ft.setX(3); ft.setY(2); return ft.addToTotal(3)"));
} }
} }

View File

@ -127,7 +127,7 @@ public class BasicAPITests extends ScriptTestCase {
} }
public void testPublicMemberAccess() { public void testPublicMemberAccess() {
assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + assertEquals(5, exec("org.elasticsearch.painless.FeatureTestObject ft = new org.elasticsearch.painless.FeatureTestObject();" +
"ft.z = 5; return ft.z;")); "ft.z = 5; return ft.z;"));
} }

View File

@ -191,11 +191,11 @@ public class BasicExpressionTests extends ScriptTestCase {
assertNull( exec("def a = null; return a?.length()")); assertNull( exec("def a = null; return a?.length()"));
assertEquals(3, exec("def a = 'foo'; return a?.length()")); assertEquals(3, exec("def a = 'foo'; return a?.length()"));
// Read shortcut // Read shortcut
assertMustBeNullable( "org.elasticsearch.painless.FeatureTest a = null; return a?.x"); assertMustBeNullable( "org.elasticsearch.painless.FeatureTestObject a = null; return a?.x");
assertMustBeNullable( assertMustBeNullable(
"org.elasticsearch.painless.FeatureTest a = new org.elasticsearch.painless.FeatureTest(); return a?.x"); "org.elasticsearch.painless.FeatureTestObject a = new org.elasticsearch.painless.FeatureTestObject(); return a?.x");
assertNull( exec("def a = null; return a?.x")); assertNull( exec("def a = null; return a?.x"));
assertEquals(0, exec("def a = new org.elasticsearch.painless.FeatureTest(); return a?.x")); assertEquals(0, exec("def a = new org.elasticsearch.painless.FeatureTestObject(); return a?.x"));
// Maps // Maps
// Call // Call
@ -222,7 +222,7 @@ public class BasicExpressionTests extends ScriptTestCase {
assertEquals(2, exec("def a = new int[] {2, 3}; return a?.length")); assertEquals(2, exec("def a = new int[] {2, 3}; return a?.length"));
// Results from maps (should just work but let's test anyway) // Results from maps (should just work but let's test anyway)
FeatureTest t = new FeatureTest(); FeatureTestObject t = new FeatureTestObject();
assertNull( exec("Map a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true)); assertNull( exec("Map a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true));
assertNull( exec("Map a = ['thing': params.t]; return a.other?.x", singletonMap("t", t), true)); assertNull( exec("Map a = ['thing': params.t]; return a.other?.x", singletonMap("t", t), true));
assertNull( exec("def a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true)); assertNull( exec("def a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true));
@ -254,8 +254,8 @@ public class BasicExpressionTests extends ScriptTestCase {
+ "return a.missing_length", true)); + "return a.missing_length", true));
// Writes, all unsupported at this point // Writes, all unsupported at this point
// assertEquals(null, exec("org.elasticsearch.painless.FeatureTest a = null; return a?.x")); // Read field // assertEquals(null, exec("org.elasticsearch.painless.FeatureTestObject a = null; return a?.x")); // Read field
// assertEquals(null, exec("org.elasticsearch.painless.FeatureTest a = null; a?.x = 7; return a?.x")); // Write field // assertEquals(null, exec("org.elasticsearch.painless.FeatureTestObject a = null; a?.x = 7; return a?.x")); // Write field
// assertEquals(null, exec("Map a = null; a?.other = 'wow'; return a?.other")); // Write shortcut // assertEquals(null, exec("Map a = null; a?.other = 'wow'; return a?.other")); // Write shortcut
// assertEquals(null, exec("def a = null; a?.other = 'cat'; return a?.other")); // Write shortcut // assertEquals(null, exec("def a = null; a?.other = 'cat'; return a?.other")); // Write shortcut
// assertEquals(null, exec("Map a = ['thing': 'bar']; a.other?.cat = 'no'; return a.other?.cat")); // assertEquals(null, exec("Map a = ['thing': 'bar']; a.other?.cat = 'no'; return a.other?.cat"));

View File

@ -19,14 +19,14 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
public class FeatureTestAugmentation { public class FeatureTestAugmentationObject {
public static int getTotal(FeatureTest ft) { public static int getTotal(FeatureTestObject ft) {
return ft.getX() + ft.getY(); return ft.getX() + ft.getY();
} }
public static int addToTotal(FeatureTest ft, int add) { public static int addToTotal(FeatureTestObject ft, int add) {
return getTotal(ft) + add; return getTotal(ft) + add;
} }
private FeatureTestAugmentation() {} private FeatureTestAugmentationObject() {}
} }

View File

@ -23,7 +23,7 @@ import java.util.function.Function;
*/ */
/** Currently just a dummy class for testing a few features not yet exposed by whitelist! */ /** Currently just a dummy class for testing a few features not yet exposed by whitelist! */
public class FeatureTest { public class FeatureTestObject {
/** static method that returns true */ /** static method that returns true */
public static boolean overloadedStatic() { public static boolean overloadedStatic() {
return true; return true;
@ -51,11 +51,11 @@ public class FeatureTest {
private Integer i; private Integer i;
/** empty ctor */ /** empty ctor */
public FeatureTest() { public FeatureTestObject() {
} }
/** ctor with params */ /** ctor with params */
public FeatureTest(int x, int y) { public FeatureTestObject(int x, int y) {
this.x = x; this.x = x;
this.y = y; this.y = y;
} }

View File

@ -46,12 +46,12 @@ public class FunctionRefTests extends ScriptTestCase {
public void testQualifiedStaticMethodReference() { public void testQualifiedStaticMethodReference() {
assertEquals(true, assertEquals(true,
exec("List l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTest::overloadedStatic).findFirst().get()")); exec("List l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTestObject::overloadedStatic).findFirst().get()"));
} }
public void testQualifiedStaticMethodReferenceDef() { public void testQualifiedStaticMethodReferenceDef() {
assertEquals(true, assertEquals(true,
exec("def l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTest::overloadedStatic).findFirst().get()")); exec("def l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTestObject::overloadedStatic).findFirst().get()"));
} }
public void testQualifiedVirtualMethodReference() { public void testQualifiedVirtualMethodReference() {
@ -133,7 +133,7 @@ public class FunctionRefTests extends ScriptTestCase {
assertEquals("testingcdefg", exec( assertEquals("testingcdefg", exec(
"String x = 'testing';" + "String x = 'testing';" +
"String y = 'abcdefg';" + "String y = 'abcdefg';" +
"org.elasticsearch.painless.FeatureTest test = new org.elasticsearch.painless.FeatureTest(2,3);" + "org.elasticsearch.painless.FeatureTestObject test = new org.elasticsearch.painless.FeatureTestObject(2,3);" +
"return test.twoFunctionsOfX(x::concat, y::substring);")); "return test.twoFunctionsOfX(x::concat, y::substring);"));
} }
@ -141,7 +141,7 @@ public class FunctionRefTests extends ScriptTestCase {
assertEquals("testingcdefg", exec( assertEquals("testingcdefg", exec(
"def x = 'testing';" + "def x = 'testing';" +
"def y = 'abcdefg';" + "def y = 'abcdefg';" +
"org.elasticsearch.painless.FeatureTest test = new org.elasticsearch.painless.FeatureTest(2,3);" + "org.elasticsearch.painless.FeatureTestObject test = new org.elasticsearch.painless.FeatureTestObject(2,3);" +
"return test.twoFunctionsOfX(x::concat, y::substring);")); "return test.twoFunctionsOfX(x::concat, y::substring);"));
} }
@ -149,7 +149,7 @@ public class FunctionRefTests extends ScriptTestCase {
assertEquals("testingcdefg", exec( assertEquals("testingcdefg", exec(
"String x = 'testing';" + "String x = 'testing';" +
"String y = 'abcdefg';" + "String y = 'abcdefg';" +
"def test = new org.elasticsearch.painless.FeatureTest(2,3);" + "def test = new org.elasticsearch.painless.FeatureTestObject(2,3);" +
"return test.twoFunctionsOfX(x::concat, y::substring);")); "return test.twoFunctionsOfX(x::concat, y::substring);"));
} }
@ -157,7 +157,7 @@ public class FunctionRefTests extends ScriptTestCase {
assertEquals("testingcdefg", exec( assertEquals("testingcdefg", exec(
"def x = 'testing';" + "def x = 'testing';" +
"def y = 'abcdefg';" + "def y = 'abcdefg';" +
"def test = new org.elasticsearch.painless.FeatureTest(2,3);" + "def test = new org.elasticsearch.painless.FeatureTestObject(2,3);" +
"return test.twoFunctionsOfX(x::concat, y::substring);")); "return test.twoFunctionsOfX(x::concat, y::substring);"));
} }

View File

@ -333,15 +333,15 @@ public class GeneralCastTests extends ScriptTestCase {
assertEquals(1, exec("def y = 2.0; y.compareTo(1);")); assertEquals(1, exec("def y = 2.0; y.compareTo(1);"));
assertEquals(1, exec("int x = 1; def y = 2.0; y.compareTo(x);")); assertEquals(1, exec("int x = 1; def y = 2.0; y.compareTo(x);"));
assertEquals(-1, exec("Integer x = Integer.valueOf(3); def y = 2.0; y.compareTo(x);")); assertEquals(-1, exec("Integer x = Integer.valueOf(3); def y = 2.0; y.compareTo(x);"));
assertEquals(2, exec("def f = new org.elasticsearch.painless.FeatureTest(); f.i = (byte)2; f.i")); assertEquals(2, exec("def f = new org.elasticsearch.painless.FeatureTestObject(); f.i = (byte)2; f.i"));
assertEquals(4.0, exec( assertEquals(4.0, exec(
"def x = new org.elasticsearch.painless.FeatureTest(); " + "def x = new org.elasticsearch.painless.FeatureTestObject(); " +
"Byte i = Byte.valueOf(3); " + "Byte i = Byte.valueOf(3); " +
"byte j = 1;" + "byte j = 1;" +
"Short s = Short.valueOf(-2);" + "Short s = Short.valueOf(-2);" +
"x.mixedAdd(j, i, (char)2, s)" "x.mixedAdd(j, i, (char)2, s)"
)); ));
assertNull(exec("def f = new org.elasticsearch.painless.FeatureTest(); f.i = null; f.i")); assertNull(exec("def f = new org.elasticsearch.painless.FeatureTestObject(); f.i = null; f.i"));
expectScriptThrows(ClassCastException.class, () -> exec("def x = 2.0; def y = 1; y.compareTo(x);")); expectScriptThrows(ClassCastException.class, () -> exec("def x = 2.0; def y = 1; y.compareTo(x);"));
expectScriptThrows(ClassCastException.class, () -> exec("float f = 1.0f; def y = 1; y.compareTo(f);")); expectScriptThrows(ClassCastException.class, () -> exec("float f = 1.0f; def y = 1; y.compareTo(f);"));
} }

View File

@ -112,7 +112,7 @@ public class LambdaTests extends ScriptTestCase {
public void testTwoLambdas() { public void testTwoLambdas() {
assertEquals("testingcdefg", exec( assertEquals("testingcdefg", exec(
"org.elasticsearch.painless.FeatureTest test = new org.elasticsearch.painless.FeatureTest(2,3);" + "org.elasticsearch.painless.FeatureTestObject test = new org.elasticsearch.painless.FeatureTestObject(2,3);" +
"return test.twoFunctionsOfX(x -> 'testing'.concat(x), y -> 'abcdefg'.substring(y))")); "return test.twoFunctionsOfX(x -> 'testing'.concat(x), y -> 'abcdefg'.substring(y))"));
} }

View File

@ -41,14 +41,14 @@ public class OverloadTests extends ScriptTestCase {
} }
public void testConstructor() { public void testConstructor() {
assertEquals(true, exec("org.elasticsearch.painless.FeatureTest f = new org.elasticsearch.painless.FeatureTest();" + assertEquals(true, exec("org.elasticsearch.painless.FeatureTestObject f = new org.elasticsearch.painless.FeatureTestObject();" +
"return f.x == 0 && f.y == 0;")); "return f.x == 0 && f.y == 0;"));
assertEquals(true, exec("org.elasticsearch.painless.FeatureTest f = new org.elasticsearch.painless.FeatureTest(1, 2);" + assertEquals(true, exec("org.elasticsearch.painless.FeatureTestObject f = new org.elasticsearch.painless.FeatureTestObject(1, 2);" +
"return f.x == 1 && f.y == 2;")); "return f.x == 1 && f.y == 2;"));
} }
public void testStatic() { public void testStatic() {
assertEquals(true, exec("return org.elasticsearch.painless.FeatureTest.overloadedStatic();")); assertEquals(true, exec("return org.elasticsearch.painless.FeatureTestObject.overloadedStatic();"));
assertEquals(false, exec("return org.elasticsearch.painless.FeatureTest.overloadedStatic(false);")); assertEquals(false, exec("return org.elasticsearch.painless.FeatureTestObject.overloadedStatic(false);"));
} }
} }

View File

@ -19,8 +19,8 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.painless.lookup.PainlessClass; import org.elasticsearch.painless.lookup.PainlessClass;

View File

@ -22,14 +22,14 @@ package org.elasticsearch.painless;
import junit.framework.AssertionFailedError; import junit.framework.AssertionFailedError;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.antlr.Walker;
import org.elasticsearch.painless.lookup.PainlessLookup;
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.painless.spi.WhitelistLoader;
import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.ScriptException;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.junit.Before; import org.junit.Before;
import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
@ -45,8 +45,6 @@ import static org.hamcrest.Matchers.hasSize;
* Typically just asserts the output of {@code exec()} * Typically just asserts the output of {@code exec()}
*/ */
public abstract class ScriptTestCase extends ESTestCase { public abstract class ScriptTestCase extends ESTestCase {
private static final PainlessLookup PAINLESS_LOOKUP = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
protected PainlessScriptEngine scriptEngine; protected PainlessScriptEngine scriptEngine;
@Before @Before
@ -66,7 +64,9 @@ public abstract class ScriptTestCase extends ESTestCase {
*/ */
protected Map<ScriptContext<?>, List<Whitelist>> scriptContexts() { protected Map<ScriptContext<?>, List<Whitelist>> scriptContexts() {
Map<ScriptContext<?>, List<Whitelist>> contexts = new HashMap<>(); Map<ScriptContext<?>, List<Whitelist>> contexts = new HashMap<>();
contexts.put(PainlessTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); List<Whitelist> whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS);
whitelists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.painless.test"));
contexts.put(PainlessTestScript.CONTEXT, whitelists);
return contexts; return contexts;
} }
@ -91,12 +91,13 @@ public abstract class ScriptTestCase extends ESTestCase {
public Object exec(String script, Map<String, Object> vars, Map<String,String> compileParams, boolean picky) { public Object exec(String script, Map<String, Object> vars, Map<String,String> compileParams, boolean picky) {
// test for ambiguity errors before running the actual script if picky is true // test for ambiguity errors before running the actual script if picky is true
if (picky) { if (picky) {
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(PAINLESS_LOOKUP, PainlessTestScript.class); ScriptClassInfo scriptClassInfo =
new ScriptClassInfo(scriptEngine.getContextsToLookups().get(PainlessTestScript.CONTEXT), PainlessTestScript.class);
CompilerSettings pickySettings = new CompilerSettings(); CompilerSettings pickySettings = new CompilerSettings();
pickySettings.setPicky(true); pickySettings.setPicky(true);
pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings()));
Walker.buildPainlessTree( Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings,
scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings, PAINLESS_LOOKUP, null); scriptEngine.getContextsToLookups().get(PainlessTestScript.CONTEXT), null);
} }
// test actual script execution // test actual script execution
PainlessTestScript.Factory factory = scriptEngine.compile(null, script, PainlessTestScript.CONTEXT, compileParams); PainlessTestScript.Factory factory = scriptEngine.compile(null, script, PainlessTestScript.CONTEXT, compileParams);

View File

@ -19,7 +19,7 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
public class StaticTest { public class StaticTestObject {
public static int staticAddIntsTest(int x, int y) { public static int staticAddIntsTest(int x, int y) {
return x + y; return x + y;
} }

View File

@ -20,12 +20,12 @@
package org.elasticsearch.painless.node; package org.elasticsearch.painless.node;
import org.elasticsearch.painless.CompilerSettings; import org.elasticsearch.painless.CompilerSettings;
import org.elasticsearch.painless.FeatureTest; import org.elasticsearch.painless.FeatureTestObject;
import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Locals.Variable;
import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Location;
import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.Operation;
import org.elasticsearch.painless.action.PainlessExecuteAction.PainlessTestScript;
import org.elasticsearch.painless.ScriptClassInfo; import org.elasticsearch.painless.ScriptClassInfo;
import org.elasticsearch.painless.action.PainlessExecuteAction.PainlessTestScript;
import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.antlr.Walker;
import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessCast;
import org.elasticsearch.painless.lookup.PainlessClass; import org.elasticsearch.painless.lookup.PainlessClass;
@ -35,8 +35,10 @@ import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessLookupUtility;
import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.PainlessMethod;
import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.painless.spi.WhitelistLoader;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -49,7 +51,6 @@ import static org.elasticsearch.painless.node.SSource.MainMethodReserved;
* Tests {@link Object#toString} implementations on all extensions of {@link ANode}. * Tests {@link Object#toString} implementations on all extensions of {@link ANode}.
*/ */
public class NodeToStringTests extends ESTestCase { public class NodeToStringTests extends ESTestCase {
private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
public void testEAssignment() { public void testEAssignment() {
assertToString( assertToString(
@ -379,10 +380,11 @@ public class NodeToStringTests extends ESTestCase {
+ "return a.length"); + "return a.length");
assertToString( assertToString(
"(SSource\n" "(SSource\n"
+ " (SDeclBlock (SDeclaration org.elasticsearch.painless.FeatureTest a (ENewObj org.elasticsearch.painless.FeatureTest)))\n" + " (SDeclBlock (SDeclaration org.elasticsearch.painless.FeatureTestObject a"
+ " (ENewObj org.elasticsearch.painless.FeatureTestObject)))\n"
+ " (SExpression (EAssignment (PField (EVariable a) x) = (ENumeric 10)))\n" + " (SExpression (EAssignment (PField (EVariable a) x) = (ENumeric 10)))\n"
+ " (SReturn (PField (EVariable a) x)))", + " (SReturn (PField (EVariable a) x)))",
"org.elasticsearch.painless.FeatureTest a = new org.elasticsearch.painless.FeatureTest();\n" "org.elasticsearch.painless.FeatureTestObject a = new org.elasticsearch.painless.FeatureTestObject();\n"
+ "a.x = 10;\n" + "a.x = 10;\n"
+ "return a.x"); + "return a.x");
} }
@ -497,10 +499,10 @@ public class NodeToStringTests extends ESTestCase {
public void testPSubShortcut() { public void testPSubShortcut() {
Location l = new Location(getTestName(), 0); Location l = new Location(getTestName(), 0);
PainlessClass s = painlessLookup.lookupPainlessClass(FeatureTest.class); PainlessClass s = painlessLookup.lookupPainlessClass(FeatureTestObject.class);
PainlessMethod getter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("getX", 0)); PainlessMethod getter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("getX", 0));
PainlessMethod setter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("setX", 1)); PainlessMethod setter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("setX", 1));
PSubShortcut node = new PSubShortcut(l, "x", FeatureTest.class.getName(), getter, setter); PSubShortcut node = new PSubShortcut(l, "x", FeatureTestObject.class.getName(), getter, setter);
node.prefix = new EVariable(l, "a"); node.prefix = new EVariable(l, "a");
assertEquals("(PSubShortcut (EVariable a) x)", node.toString()); assertEquals("(PSubShortcut (EVariable a) x)", node.toString());
assertEquals("(PSubNullSafeCallInvoke (PSubShortcut (EVariable a) x))", assertEquals("(PSubNullSafeCallInvoke (PSubShortcut (EVariable a) x))",
@ -892,6 +894,14 @@ public class NodeToStringTests extends ESTestCase {
+ "}"); + "}");
} }
private final PainlessLookup painlessLookup;
public NodeToStringTests() {
List<Whitelist> whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS);
whitelists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.painless.test"));
painlessLookup = PainlessLookupBuilder.buildFromWhitelists(whitelists);
}
private void assertToString(String expected, String code) { private void assertToString(String expected, String code) {
assertEquals(expected, walk(code).toString()); assertEquals(expected, walk(code).toString());
} }

View File

@ -2,7 +2,29 @@
class org.elasticsearch.painless.BindingsTests$BindingsTestScript { class org.elasticsearch.painless.BindingsTests$BindingsTestScript {
} }
class org.elasticsearch.painless.FeatureTestObject no_import {
int z
()
(int,int)
int getX()
int getY()
Integer getI()
void setX(int)
void setY(int)
void setI(Integer)
boolean overloadedStatic()
boolean overloadedStatic(boolean)
int staticNumberTest(Number)
Double mixedAdd(int, Byte, char, Float)
Object twoFunctionsOfX(Function,Function)
void listInput(List)
int org.elasticsearch.painless.FeatureTestAugmentationObject getTotal()
int org.elasticsearch.painless.FeatureTestAugmentationObject addToTotal(int)
}
static_import { static_import {
int staticAddIntsTest(int, int) from_class org.elasticsearch.painless.StaticTestObject
float staticAddFloatsTest(float, float) from_class org.elasticsearch.painless.FeatureTestObject
int addWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingsTests$BindingTestClass int addWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingsTests$BindingTestClass
int addThisWithState(BindingsTests.BindingsTestScript, int, int, int, double) bound_to org.elasticsearch.painless.BindingsTests$ThisBindingTestClass int addThisWithState(BindingsTests.BindingsTestScript, int, int, int, double) bound_to org.elasticsearch.painless.BindingsTests$ThisBindingTestClass
int addEmptyThisWithState(BindingsTests.BindingsTestScript, int) bound_to org.elasticsearch.painless.BindingsTests$EmptyThisBindingTestClass int addEmptyThisWithState(BindingsTests.BindingsTestScript, int) bound_to org.elasticsearch.painless.BindingsTests$EmptyThisBindingTestClass

View File

@ -95,6 +95,11 @@ dependencies {
es090 'org.elasticsearch:elasticsearch:0.90.13@zip' es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
} }
// Issue tracked in https://github.com/elastic/elasticsearch/issues/40904
if (project.inFipsJvm) {
integTest.enabled = false
}
if (Os.isFamily(Os.FAMILY_WINDOWS)) { if (Os.isFamily(Os.FAMILY_WINDOWS)) {
logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows") logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows")
integTest.runner { integTest.runner {

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -1 +0,0 @@
dda059f4908e1b548b7ba68d81a3b05897f27cb0

View File

@ -0,0 +1 @@
c27c9d6f15435dc2b6947112027b418b0eef32b9

View File

@ -105,3 +105,15 @@ task bwcTestSnapshots {
check.dependsOn(bwcTestSnapshots) check.dependsOn(bwcTestSnapshots)
configurations {
testArtifacts.extendsFrom testRuntime
}
task testJar(type: Jar) {
appendix 'test'
from sourceSets.test.output
}
artifacts {
testArtifacts testJar
}

View File

@ -1,4 +1,4 @@
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright * this work for additional information regarding copyright
@ -23,8 +23,6 @@ apply plugin: 'elasticsearch.rest-test'
apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.standalone-test'
integTestCluster { integTestCluster {
autoSetInitialMasterNodes = false
autoSetHostsProvider = false
/** /**
* Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch
* can successfully startup. * can successfully startup.

View File

@ -42,6 +42,7 @@ import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate; import java.util.function.Predicate;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
@ -51,6 +52,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAlloc
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isIn;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ -206,7 +208,6 @@ public class RecoveryIT extends AbstractRollingTestCase {
return null; return null;
} }
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34950")
public void testRelocationWithConcurrentIndexing() throws Exception { public void testRelocationWithConcurrentIndexing() throws Exception {
final String index = "relocation_with_concurrent_indexing"; final String index = "relocation_with_concurrent_indexing";
switch (CLUSTER_TYPE) { switch (CLUSTER_TYPE) {
@ -240,6 +241,15 @@ public class RecoveryIT extends AbstractRollingTestCase {
ensureNoInitializingShards(); // wait for all other shard activity to finish ensureNoInitializingShards(); // wait for all other shard activity to finish
updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._id", newNode)); updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._id", newNode));
asyncIndexDocs(index, 10, 50).get(); asyncIndexDocs(index, 10, 50).get();
// ensure the relocation from old node to new node has occurred; otherwise ensureGreen can
// return true even though shards haven't moved to the new node yet (allocation was throttled).
assertBusy(() -> {
Map<String, ?> state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state")));
String xpath = "routing_table.indices." + index + ".shards.0.node";
@SuppressWarnings("unchecked") List<String> assignedNodes = (List<String>) XContentMapValues.extractValue(xpath, state);
assertNotNull(state.toString(), assignedNodes);
assertThat(state.toString(), newNode, isIn(assignedNodes));
}, 60, TimeUnit.SECONDS);
ensureGreen(index); ensureGreen(index);
client().performRequest(new Request("POST", index + "/_refresh")); client().performRequest(new Request("POST", index + "/_refresh"));
assertCount(index, "_only_nodes:" + newNode, 60); assertCount(index, "_only_nodes:" + newNode, 60);

View File

@ -439,4 +439,33 @@ public abstract class ArchiveTestCase extends PackagingTestCase {
assertThat(result.stdout, containsString("Master node was successfully bootstrapped")); assertThat(result.stdout, containsString("Master node was successfully bootstrapped"));
} }
public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception {
assumeThat(installation, is(notNullValue()));
final Installation.Executables bin = installation.executables();
final Shell sh = newShell();
// Run the cli tools from the tmp dir
sh.setWorkingDirectory(getTempDir());
Platforms.PlatformAction action = () -> {
Result result = sh.run(bin.elasticsearchCertutil+ " -h");
assertThat(result.stdout,
containsString("Simplifies certificate creation for use with the Elastic Stack"));
result = sh.run(bin.elasticsearchSyskeygen+ " -h");
assertThat(result.stdout,
containsString("system key tool"));
result = sh.run(bin.elasticsearchSetupPasswords+ " -h");
assertThat(result.stdout,
containsString("Sets the passwords for reserved users"));
result = sh.run(bin.elasticsearchUsers+ " -h");
assertThat(result.stdout,
containsString("Manages elasticsearch file users"));
};
if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) {
Platforms.onLinux(action);
Platforms.onWindows(action);
}
}
} }

View File

@ -104,6 +104,9 @@ public class Installation {
public final Path elasticsearchCertutil = platformExecutable("elasticsearch-certutil"); public final Path elasticsearchCertutil = platformExecutable("elasticsearch-certutil");
public final Path elasticsearchShard = platformExecutable("elasticsearch-shard"); public final Path elasticsearchShard = platformExecutable("elasticsearch-shard");
public final Path elasticsearchNode = platformExecutable("elasticsearch-node"); public final Path elasticsearchNode = platformExecutable("elasticsearch-node");
public final Path elasticsearchSetupPasswords = platformExecutable("elasticsearch-setup-passwords");
public final Path elasticsearchSyskeygen = platformExecutable("elasticsearch-syskeygen");
public final Path elasticsearchUsers = platformExecutable("elasticsearch-users");
private Path platformExecutable(String name) { private Path platformExecutable(String name) {
final String platformExecutableName = Platforms.WINDOWS final String platformExecutableName = Platforms.WINDOWS

View File

@ -57,7 +57,7 @@ public class Build {
return displayName; return displayName;
} }
public static Flavor fromDisplayName(final String displayName) { public static Flavor fromDisplayName(final String displayName, final boolean strict) {
switch (displayName) { switch (displayName) {
case "default": case "default":
return Flavor.DEFAULT; return Flavor.DEFAULT;
@ -66,7 +66,12 @@ public class Build {
case "unknown": case "unknown":
return Flavor.UNKNOWN; return Flavor.UNKNOWN;
default: default:
throw new IllegalStateException("unexpected distribution flavor [" + displayName + "]; your distribution is broken"); if (strict) {
final String message = "unexpected distribution flavor [" + displayName + "]; your distribution is broken";
throw new IllegalStateException(message);
} else {
return Flavor.UNKNOWN;
}
} }
} }
@ -91,7 +96,7 @@ public class Build {
this.displayName = displayName; this.displayName = displayName;
} }
public static Type fromDisplayName(final String displayName) { public static Type fromDisplayName(final String displayName, final boolean strict) {
switch (displayName) { switch (displayName) {
case "deb": case "deb":
return Type.DEB; return Type.DEB;
@ -106,9 +111,14 @@ public class Build {
case "unknown": case "unknown":
return Type.UNKNOWN; return Type.UNKNOWN;
default: default:
throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken"); if (strict) {
throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken");
} else {
return Type.UNKNOWN;
}
} }
} }
} }
static { static {
@ -119,8 +129,9 @@ public class Build {
final boolean isSnapshot; final boolean isSnapshot;
final String version; final String version;
flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown")); // these are parsed at startup, and we require that we are able to recognize the values passed in by the startup scripts
type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown")); flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown"), true);
type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown"), true);
final String esPrefix = "elasticsearch-" + Version.CURRENT; final String esPrefix = "elasticsearch-" + Version.CURRENT;
final URL url = getElasticsearchCodeSourceLocation(); final URL url = getElasticsearchCodeSourceLocation();
@ -214,12 +225,14 @@ public class Build {
final Flavor flavor; final Flavor flavor;
final Type type; final Type type;
if (in.getVersion().onOrAfter(Version.V_6_3_0)) { if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
flavor = Flavor.fromDisplayName(in.readString()); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know
flavor = Flavor.fromDisplayName(in.readString(), false);
} else { } else {
flavor = Flavor.OSS; flavor = Flavor.OSS;
} }
if (in.getVersion().onOrAfter(Version.V_6_3_0)) { if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
type = Type.fromDisplayName(in.readString()); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know
type = Type.fromDisplayName(in.readString(), false);
} else { } else {
type = Type.UNKNOWN; type = Type.UNKNOWN;
} }

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.TransportException;
import java.io.IOException; import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
@ -193,6 +194,14 @@ public final class ExceptionsHelper {
return null; return null;
} }
public static boolean isTransportStoppedForAction(final Throwable t, final String action) {
final TransportException maybeTransport =
(TransportException) ExceptionsHelper.unwrap(t, TransportException.class);
return maybeTransport != null
&& (maybeTransport.getMessage().equals("TransportService is closed stopped can't send request")
|| maybeTransport.getMessage().equals("transport stopped, action: " + action));
}
/** /**
* Throws the specified exception. If null if specified then <code>true</code> is returned. * Throws the specified exception. If null if specified then <code>true</code> is returned.
*/ */

View File

@ -222,13 +222,9 @@ public interface DocWriteRequest<T> extends IndicesRequest {
byte type = in.readByte(); byte type = in.readByte();
DocWriteRequest<?> docWriteRequest; DocWriteRequest<?> docWriteRequest;
if (type == 0) { if (type == 0) {
IndexRequest indexRequest = new IndexRequest(); docWriteRequest = new IndexRequest(in);
indexRequest.readFrom(in);
docWriteRequest = indexRequest;
} else if (type == 1) { } else if (type == 1) {
DeleteRequest deleteRequest = new DeleteRequest(); docWriteRequest = new DeleteRequest(in);
deleteRequest.readFrom(in);
docWriteRequest = deleteRequest;
} else if (type == 2) { } else if (type == 2) {
UpdateRequest updateRequest = new UpdateRequest(); UpdateRequest updateRequest = new UpdateRequest();
updateRequest.readFrom(in); updateRequest.readFrom(in);

View File

@ -136,9 +136,11 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA
public static class ShardRequest extends ReplicationRequest<ShardRequest> { public static class ShardRequest extends ReplicationRequest<ShardRequest> {
private ClusterBlock clusterBlock; private final ClusterBlock clusterBlock;
ShardRequest(){ ShardRequest(StreamInput in) throws IOException {
super(in);
clusterBlock = new ClusterBlock(in);
} }
public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) { public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) {
@ -153,9 +155,8 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA
} }
@Override @Override
public void readFrom(final StreamInput in) throws IOException { public void readFrom(final StreamInput in) {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
clusterBlock = new ClusterBlock(in);
} }
@Override @Override

View File

@ -52,6 +52,12 @@ public class FlushRequest extends BroadcastRequest<FlushRequest> {
super(indices); super(indices);
} }
public FlushRequest(StreamInput in) throws IOException {
super(in);
force = in.readBoolean();
waitIfOngoing = in.readBoolean();
}
/** /**
* Returns {@code true} iff a flush should block * Returns {@code true} iff a flush should block
* if a another flush operation is already running. Otherwise {@code false} * if a another flush operation is already running. Otherwise {@code false}
@ -103,9 +109,7 @@ public class FlushRequest extends BroadcastRequest<FlushRequest> {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
force = in.readBoolean();
waitIfOngoing = in.readBoolean();
} }
@Override @Override

View File

@ -29,7 +29,7 @@ import java.io.IOException;
public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> { public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
private FlushRequest request = new FlushRequest(); private final FlushRequest request;
public ShardFlushRequest(FlushRequest request, ShardId shardId) { public ShardFlushRequest(FlushRequest request, ShardId shardId) {
super(shardId); super(shardId);
@ -37,7 +37,9 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
this.waitForActiveShards = ActiveShardCount.NONE; // don't wait for any active shards before proceeding, by default this.waitForActiveShards = ActiveShardCount.NONE; // don't wait for any active shards before proceeding, by default
} }
public ShardFlushRequest() { public ShardFlushRequest(StreamInput in) throws IOException {
super(in);
request = new FlushRequest(in);
} }
FlushRequest getRequest() { FlushRequest getRequest() {
@ -46,8 +48,7 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
request.readFrom(in);
} }
@Override @Override

View File

@ -55,7 +55,7 @@ public class TransportShardFlushAction
IndexShard primary) { IndexShard primary) {
primary.flush(shardRequest.getRequest()); primary.flush(shardRequest.getRequest());
logger.trace("{} flush request executed on primary", primary.shardId()); logger.trace("{} flush request executed on primary", primary.shardId());
return new PrimaryResult<ShardFlushRequest, ReplicationResponse>(shardRequest, new ReplicationResponse()); return new PrimaryResult<>(shardRequest, new ReplicationResponse());
} }
@Override @Override

View File

@ -20,6 +20,9 @@
package org.elasticsearch.action.admin.indices.refresh; package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
/** /**
* A refresh request making all operations performed since the last refresh available for search. The (near) real-time * A refresh request making all operations performed since the last refresh available for search. The (near) real-time
@ -35,4 +38,8 @@ public class RefreshRequest extends BroadcastRequest<RefreshRequest> {
public RefreshRequest(String... indices) { public RefreshRequest(String... indices) {
super(indices); super(indices);
} }
public RefreshRequest(StreamInput in) throws IOException {
super(in);
}
} }

View File

@ -33,7 +33,14 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
private BulkItemRequest[] items; private BulkItemRequest[] items;
public BulkShardRequest() { public BulkShardRequest(StreamInput in) throws IOException {
super(in);
items = new BulkItemRequest[in.readVInt()];
for (int i = 0; i < items.length; i++) {
if (in.readBoolean()) {
items[i] = BulkItemRequest.readBulkItem(in);
}
}
} }
public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
@ -60,7 +67,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
indices.add(item.index()); indices.add(item.index());
} }
} }
return indices.toArray(new String[indices.size()]); return indices.toArray(new String[0]);
} }
@Override @Override
@ -78,14 +85,8 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
items = new BulkItemRequest[in.readVInt()];
for (int i = 0; i < items.length; i++) {
if (in.readBoolean()) {
items[i] = BulkItemRequest.readBulkItem(in);
}
}
} }
@Override @Override

View File

@ -28,11 +28,10 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.WriteResponse;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
/** use transport bulk action directly */ /** use transport bulk action directly */
@Deprecated @Deprecated
public abstract class TransportSingleItemBulkWriteAction< public abstract class TransportSingleItemBulkWriteAction<
@ -43,8 +42,8 @@ public abstract class TransportSingleItemBulkWriteAction<
private final TransportBulkAction bulkAction; private final TransportBulkAction bulkAction;
protected TransportSingleItemBulkWriteAction(String actionName, TransportService transportService, ActionFilters actionFilters, protected TransportSingleItemBulkWriteAction(String actionName, TransportService transportService, ActionFilters actionFilters,
Supplier<Request> request, TransportBulkAction bulkAction) { Writeable.Reader<Request> requestReader, TransportBulkAction bulkAction) {
super(actionName, transportService, actionFilters, request); super(actionName, transportService, actionFilters, requestReader);
this.bulkAction = bulkAction; this.bulkAction = bulkAction;
} }

View File

@ -53,6 +53,8 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
implements DocWriteRequest<DeleteRequest>, CompositeIndicesRequest { implements DocWriteRequest<DeleteRequest>, CompositeIndicesRequest {
private static final ShardId NO_SHARD_ID = null;
// Set to null initially so we can know to override in bulk requests that have a default type. // Set to null initially so we can know to override in bulk requests that have a default type.
private String type; private String type;
private String id; private String id;
@ -63,7 +65,27 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
private long ifSeqNo = UNASSIGNED_SEQ_NO; private long ifSeqNo = UNASSIGNED_SEQ_NO;
private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
public DeleteRequest(StreamInput in) throws IOException {
super(in);
type = in.readString();
id = in.readString();
routing = in.readOptionalString();
if (in.getVersion().before(Version.V_7_0_0)) {
in.readOptionalString(); // _parent
}
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
} else {
ifSeqNo = UNASSIGNED_SEQ_NO;
ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
}
}
public DeleteRequest() { public DeleteRequest() {
super(NO_SHARD_ID);
} }
/** /**
@ -71,6 +93,7 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
* must be set. * must be set.
*/ */
public DeleteRequest(String index) { public DeleteRequest(String index) {
super(NO_SHARD_ID);
this.index = index; this.index = index;
} }
@ -85,6 +108,7 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
*/ */
@Deprecated @Deprecated
public DeleteRequest(String index, String type, String id) { public DeleteRequest(String index, String type, String id) {
super(NO_SHARD_ID);
this.index = index; this.index = index;
this.type = type; this.type = type;
this.id = id; this.id = id;
@ -97,6 +121,7 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
* @param id The id of the document * @param id The id of the document
*/ */
public DeleteRequest(String index, String id) { public DeleteRequest(String index, String id) {
super(NO_SHARD_ID);
this.index = index; this.index = index;
this.id = id; this.id = id;
} }
@ -274,23 +299,8 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
type = in.readString();
id = in.readString();
routing = in.readOptionalString();
if (in.getVersion().before(Version.V_7_0_0)) {
in.readOptionalString(); // _parent
}
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
} else {
ifSeqNo = UNASSIGNED_SEQ_NO;
ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
}
} }
@Override @Override
@ -321,14 +331,4 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
public String toString() { public String toString() {
return "delete {[" + index + "][" + type() + "][" + id + "]}"; return "delete {[" + index + "][" + type() + "][" + id + "]}";
} }
/**
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
* use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
*/
@Override
public DeleteRequest setShardId(ShardId shardId) {
throw new UnsupportedOperationException("shard id should never be set on DeleteRequest");
}
} }

View File

@ -83,6 +83,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
*/ */
static final int MAX_SOURCE_LENGTH_IN_TOSTRING = 2048; static final int MAX_SOURCE_LENGTH_IN_TOSTRING = 2048;
private static final ShardId NO_SHARD_ID = null;
// Set to null initially so we can know to override in bulk requests that have a default type. // Set to null initially so we can know to override in bulk requests that have a default type.
private String type; private String type;
private String id; private String id;
@ -112,8 +114,41 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
private long ifSeqNo = UNASSIGNED_SEQ_NO; private long ifSeqNo = UNASSIGNED_SEQ_NO;
private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
public IndexRequest(StreamInput in) throws IOException {
super(in);
type = in.readOptionalString();
id = in.readOptionalString();
routing = in.readOptionalString();
if (in.getVersion().before(Version.V_7_0_0)) {
in.readOptionalString(); // _parent
}
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
in.readOptionalString(); // timestamp
in.readOptionalTimeValue(); // ttl
}
source = in.readBytesReference();
opType = OpType.fromId(in.readByte());
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
pipeline = in.readOptionalString();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
if (in.readBoolean()) {
contentType = in.readEnum(XContentType.class);
} else {
contentType = null;
}
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
} else {
ifSeqNo = UNASSIGNED_SEQ_NO;
ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
}
}
public IndexRequest() { public IndexRequest() {
super(NO_SHARD_ID);
} }
/** /**
@ -121,6 +156,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
* {@link #source(byte[], XContentType)} must be set. * {@link #source(byte[], XContentType)} must be set.
*/ */
public IndexRequest(String index) { public IndexRequest(String index) {
super(NO_SHARD_ID);
this.index = index; this.index = index;
} }
@ -131,6 +167,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
*/ */
@Deprecated @Deprecated
public IndexRequest(String index, String type) { public IndexRequest(String index, String type) {
super(NO_SHARD_ID);
this.index = index; this.index = index;
this.type = type; this.type = type;
} }
@ -146,6 +183,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
*/ */
@Deprecated @Deprecated
public IndexRequest(String index, String type, String id) { public IndexRequest(String index, String type, String id) {
super(NO_SHARD_ID);
this.index = index; this.index = index;
this.type = type; this.type = type;
this.id = id; this.id = id;
@ -593,37 +631,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
type = in.readOptionalString();
id = in.readOptionalString();
routing = in.readOptionalString();
if (in.getVersion().before(Version.V_7_0_0)) {
in.readOptionalString(); // _parent
}
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
in.readOptionalString(); // timestamp
in.readOptionalTimeValue(); // ttl
}
source = in.readBytesReference();
opType = OpType.fromId(in.readByte());
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
pipeline = in.readOptionalString();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
if (in.readBoolean()) {
contentType = in.readEnum(XContentType.class);
} else {
contentType = null;
}
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
} else {
ifSeqNo = UNASSIGNED_SEQ_NO;
ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
}
} }
@Override @Override
@ -704,15 +713,4 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
public long getAutoGeneratedTimestamp() { public long getAutoGeneratedTimestamp() {
return autoGeneratedTimestamp; return autoGeneratedTimestamp;
} }
/**
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
*/
@Override
public IndexRequest setShardId(ShardId shardId) {
throw new UnsupportedOperationException("shard id should never be set on IndexRequest");
}
} }

View File

@ -135,8 +135,12 @@ public class MainResponse extends ActionResponse implements ToXContentObject {
final String buildType = (String) value.get("build_type"); final String buildType = (String) value.get("build_type");
response.build = response.build =
new Build( new Build(
buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), /*
buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), * Be lenient when reading on the wire, the enumeration values from other versions might be different than what
* we know.
*/
buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor, false),
buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType, false),
(String) value.get("build_hash"), (String) value.get("build_hash"),
(String) value.get("build_date"), (String) value.get("build_date"),
(boolean) value.get("build_snapshot"), (boolean) value.get("build_snapshot"),

View File

@ -36,12 +36,32 @@ import java.util.Objects;
*/ */
public final class ResyncReplicationRequest extends ReplicatedWriteRequest<ResyncReplicationRequest> { public final class ResyncReplicationRequest extends ReplicatedWriteRequest<ResyncReplicationRequest> {
private long trimAboveSeqNo; private final long trimAboveSeqNo;
private Translog.Operation[] operations; private final Translog.Operation[] operations;
private long maxSeenAutoIdTimestampOnPrimary; private final long maxSeenAutoIdTimestampOnPrimary;
ResyncReplicationRequest() { ResyncReplicationRequest(StreamInput in) throws IOException {
super(); super(in);
assert Version.CURRENT.major <= 7;
if (in.getVersion().equals(Version.V_6_0_0)) {
/*
* Resync replication request serialization was broken in 6.0.0 due to the elements of the stream not being prefixed with a
* byte indicating the type of the operation.
*/
// TODO: remove this check in 8.0.0 which provides no BWC guarantees with 6.x.
throw new IllegalStateException("resync replication request serialization is broken in 6.0.0");
}
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
trimAboveSeqNo = in.readZLong();
} else {
trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
}
if (in.getVersion().onOrAfter(Version.V_6_5_0)) {
maxSeenAutoIdTimestampOnPrimary = in.readZLong();
} else {
maxSeenAutoIdTimestampOnPrimary = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
}
operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new);
} }
public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo, final long maxSeenAutoIdTimestampOnPrimary, public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo, final long maxSeenAutoIdTimestampOnPrimary,
@ -65,28 +85,8 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
} }
@Override @Override
public void readFrom(final StreamInput in) throws IOException { public void readFrom(final StreamInput in) {
assert Version.CURRENT.major <= 7; throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
if (in.getVersion().equals(Version.V_6_0_0)) {
/*
* Resync replication request serialization was broken in 6.0.0 due to the elements of the stream not being prefixed with a
* byte indicating the type of the operation.
*/
// TODO: remove this check in 8.0.0 which provides no BWC guarantees with 6.x.
throw new IllegalStateException("resync replication request serialization is broken in 6.0.0");
}
super.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
trimAboveSeqNo = in.readZLong();
} else {
trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
}
if (in.getVersion().onOrAfter(Version.V_6_5_0)) {
maxSeenAutoIdTimestampOnPrimary = in.readZLong();
} else {
maxSeenAutoIdTimestampOnPrimary = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
}
operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new);
} }
@Override @Override

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.resync; package org.elasticsearch.action.resync;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationOperation;
@ -29,7 +28,6 @@ import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -74,19 +72,6 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
return new ResyncActionReplicasProxy(primaryTerm); return new ResyncActionReplicasProxy(primaryTerm);
} }
@Override
protected void sendReplicaRequest(
final ConcreteReplicaRequest<ResyncReplicationRequest> replicaRequest,
final DiscoveryNode node,
final ActionListener<ReplicationOperation.ReplicaResponse> listener) {
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
super.sendReplicaRequest(replicaRequest, node, listener);
} else {
final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT;
listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint));
}
}
@Override @Override
protected ClusterBlockLevel globalBlockLevel() { protected ClusterBlockLevel globalBlockLevel() {
// resync should never be blocked because it's an internal action // resync should never be blocked because it's an internal action

View File

@ -36,6 +36,12 @@ public class BroadcastRequest<Request extends BroadcastRequest<Request>> extends
public BroadcastRequest() { public BroadcastRequest() {
} }
public BroadcastRequest(StreamInput in) throws IOException {
super(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
}
protected BroadcastRequest(String[] indices) { protected BroadcastRequest(String[] indices) {
this.indices = indices; this.indices = indices;
} }

View File

@ -19,8 +19,11 @@
package org.elasticsearch.action.support.replication; package org.elasticsearch.action.support.replication;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
/** /**
* A replication request that has no more information than ReplicationRequest. * A replication request that has no more information than ReplicationRequest.
* Unfortunately ReplicationRequest can't be declared as a type parameter * Unfortunately ReplicationRequest can't be declared as a type parameter
@ -28,9 +31,6 @@ import org.elasticsearch.index.shard.ShardId;
* instead. * instead.
*/ */
public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> { public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> {
public BasicReplicationRequest() {
}
/** /**
* Creates a new request with resolved shard id * Creates a new request with resolved shard id
*/ */
@ -38,6 +38,10 @@ public class BasicReplicationRequest extends ReplicationRequest<BasicReplication
super(shardId); super(shardId);
} }
public BasicReplicationRequest(StreamInput in) throws IOException {
super(in);
}
@Override @Override
public String toString() { public String toString() {
return "BasicReplicationRequest{" + shardId + "}"; return "BasicReplicationRequest{" + shardId + "}";

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -39,10 +40,12 @@ public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>
/** /**
* Constructor for deserialization. * Constructor for deserialization.
*/ */
public ReplicatedWriteRequest() { public ReplicatedWriteRequest(StreamInput in) throws IOException {
super(in);
refreshPolicy = RefreshPolicy.readFrom(in);
} }
public ReplicatedWriteRequest(ShardId shardId) { public ReplicatedWriteRequest(@Nullable ShardId shardId) {
super(shardId); super(shardId);
} }
@ -59,9 +62,8 @@ public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
refreshPolicy = RefreshPolicy.readFrom(in);
} }
@Override @Override

View File

@ -38,7 +38,6 @@ import org.elasticsearch.index.shard.ReplicationGroup;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.TransportException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -205,10 +204,9 @@ public class ReplicationOperation<
private void onNoLongerPrimary(Exception failure) { private void onNoLongerPrimary(Exception failure) {
final Throwable cause = ExceptionsHelper.unwrapCause(failure); final Throwable cause = ExceptionsHelper.unwrapCause(failure);
final boolean nodeIsClosing = cause instanceof NodeClosedException final boolean nodeIsClosing =
|| (cause instanceof TransportException && cause instanceof NodeClosedException
("TransportService is closed stopped can't send request".equals(cause.getMessage()) || ExceptionsHelper.isTransportStoppedForAction(cause, "internal:cluster/shard/failure");
|| "transport stopped, action: internal:cluster/shard/failure".equals(cause.getMessage())));
final String message; final String message;
if (nodeIsClosing) { if (nodeIsClosing) {
message = String.format(Locale.ROOT, message = String.format(Locale.ROOT,

View File

@ -54,9 +54,9 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
* shard id gets resolved by the transport action before performing request operation * shard id gets resolved by the transport action before performing request operation
* and at request creation time for shard-level bulk, refresh and flush requests. * and at request creation time for shard-level bulk, refresh and flush requests.
*/ */
protected ShardId shardId; protected final ShardId shardId;
protected TimeValue timeout = DEFAULT_TIMEOUT; protected TimeValue timeout;
protected String index; protected String index;
/** /**
@ -66,16 +66,26 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
private long routedBasedOnClusterVersion = 0; private long routedBasedOnClusterVersion = 0;
public ReplicationRequest() { public ReplicationRequest(StreamInput in) throws IOException {
super(in);
if (in.readBoolean()) {
shardId = ShardId.readShardId(in);
} else {
shardId = null;
}
waitForActiveShards = ActiveShardCount.readFrom(in);
timeout = in.readTimeValue();
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
} }
/** /**
* Creates a new request with resolved shard id * Creates a new request with resolved shard id
*/ */
public ReplicationRequest(ShardId shardId) { public ReplicationRequest(@Nullable ShardId shardId) {
this.index = shardId.getIndexName(); this.index = shardId == null ? null : shardId.getIndexName();
this.shardId = shardId; this.shardId = shardId;
this.timeout = DEFAULT_TIMEOUT;
} }
/** /**
@ -179,27 +189,13 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
if (in.readBoolean()) {
shardId = ShardId.readShardId(in);
} else {
shardId = null;
}
waitForActiveShards = ActiveShardCount.readFrom(in);
timeout = in.readTimeValue();
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
if (shardId != null) { out.writeOptionalWriteable(shardId);
out.writeBoolean(true);
shardId.writeTo(out);
} else {
out.writeBoolean(false);
}
waitForActiveShards.writeTo(out); waitForActiveShards.writeTo(out);
out.writeTimeValue(timeout); out.writeTimeValue(timeout);
out.writeString(index); out.writeString(index);
@ -211,16 +207,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return new ReplicationTask(id, type, action, getDescription(), parentTaskId, headers); return new ReplicationTask(id, type, action, getDescription(), parentTaskId, headers);
} }
/**
* Sets the target shard id for the request. The shard id is set when a
* index/delete request is resolved by the transport action
*/
@SuppressWarnings("unchecked")
public Request setShardId(ShardId shardId) {
this.shardId = shardId;
return (Request) this;
}
@Override @Override
public abstract String toString(); // force a proper to string to ease debugging public abstract String toString(); // force a proper to string to ease debugging

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.support.replication; package org.elasticsearch.action.support.replication;
import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
@ -35,6 +34,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
@ -44,7 +44,6 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Supplier;
/** /**
* Base class for requests that should be executed on all shards of an index or several indices. * Base class for requests that should be executed on all shards of an index or several indices.
@ -58,11 +57,11 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
private final ClusterService clusterService; private final ClusterService clusterService;
private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndexNameExpressionResolver indexNameExpressionResolver;
public TransportBroadcastReplicationAction(String name, Supplier<Request> request, ClusterService clusterService, public TransportBroadcastReplicationAction(String name, Writeable.Reader<Request> requestReader, ClusterService clusterService,
TransportService transportService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
TransportReplicationAction replicatedBroadcastShardAction) { TransportReplicationAction replicatedBroadcastShardAction) {
super(name, transportService, actionFilters, request); super(name, transportService, actionFilters, requestReader);
this.replicatedBroadcastShardAction = replicatedBroadcastShardAction; this.replicatedBroadcastShardAction = replicatedBroadcastShardAction;
this.clusterService = clusterService; this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver; this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -83,9 +83,6 @@ import java.io.IOException;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
/** /**
* Base class for requests that should be executed on a primary copy followed by replica copies. * Base class for requests that should be executed on a primary copy followed by replica copies.
@ -120,10 +117,10 @@ public abstract class TransportReplicationAction<
ClusterService clusterService, IndicesService indicesService, ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ShardStateAction shardStateAction, ThreadPool threadPool, ShardStateAction shardStateAction,
ActionFilters actionFilters, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request, IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader<Request> requestReader,
Supplier<ReplicaRequest> replicaRequest, String executor) { Writeable.Reader<ReplicaRequest> replicaRequestReader, String executor) {
this(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, this(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
indexNameExpressionResolver, request, replicaRequest, executor, false, false); indexNameExpressionResolver, requestReader, replicaRequestReader, executor, false, false);
} }
@ -131,8 +128,8 @@ public abstract class TransportReplicationAction<
ClusterService clusterService, IndicesService indicesService, ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ShardStateAction shardStateAction, ThreadPool threadPool, ShardStateAction shardStateAction,
ActionFilters actionFilters, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request, IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader<Request> requestReader,
Supplier<ReplicaRequest> replicaRequest, String executor, Writeable.Reader<ReplicaRequest> replicaRequestReader, String executor,
boolean syncGlobalCheckpointAfterOperation, boolean forceExecutionOnPrimary) { boolean syncGlobalCheckpointAfterOperation, boolean forceExecutionOnPrimary) {
super(actionName, actionFilters, transportService.getTaskManager()); super(actionName, actionFilters, transportService.getTaskManager());
this.threadPool = threadPool; this.threadPool = threadPool;
@ -146,14 +143,14 @@ public abstract class TransportReplicationAction<
this.transportPrimaryAction = actionName + "[p]"; this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]"; this.transportReplicaAction = actionName + "[r]";
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, this::handleOperationRequest); transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, requestReader, this::handleOperationRequest);
transportService.registerRequestHandler(transportPrimaryAction, transportService.registerRequestHandler(transportPrimaryAction, executor, forceExecutionOnPrimary, true,
() -> new ConcreteShardRequest<>(request), executor, forceExecutionOnPrimary, true, this::handlePrimaryRequest); in -> new ConcreteShardRequest<>(requestReader, in), this::handlePrimaryRequest);
// we must never reject on because of thread pool capacity on replicas // we must never reject on because of thread pool capacity on replicas
transportService.registerRequestHandler(transportReplicaAction, () -> new ConcreteReplicaRequest<>(replicaRequest), transportService.registerRequestHandler(transportReplicaAction, executor, true, true,
executor, true, true, this::handleReplicaRequest); in -> new ConcreteReplicaRequest<>(replicaRequestReader, in), this::handleReplicaRequest);
this.transportOptions = transportOptions(settings); this.transportOptions = transportOptions(settings);
@ -619,7 +616,7 @@ public abstract class TransportReplicationAction<
} }
} }
protected IndexShard getIndexShard(final ShardId shardId) { private IndexShard getIndexShard(final ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
return indexService.getShard(shardId.id()); return indexService.getShard(shardId.id());
} }
@ -1058,7 +1055,12 @@ public abstract class TransportReplicationAction<
} }
final ConcreteReplicaRequest<ReplicaRequest> replicaRequest = new ConcreteReplicaRequest<>( final ConcreteReplicaRequest<ReplicaRequest> replicaRequest = new ConcreteReplicaRequest<>(
request, replica.allocationId().getId(), primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes); request, replica.allocationId().getId(), primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes);
sendReplicaRequest(replicaRequest, node, listener); final ActionListenerResponseHandler<ReplicaResponse> handler = new ActionListenerResponseHandler<>(listener, in -> {
ReplicaResponse replicaResponse = new ReplicaResponse();
replicaResponse.readFrom(in);
return replicaResponse;
});
transportService.sendRequest(node, transportReplicaAction, replicaRequest, transportOptions, handler);
} }
@Override @Override
@ -1080,40 +1082,18 @@ public abstract class TransportReplicationAction<
} }
} }
/**
* Sends the specified replica request to the specified node.
*
* @param replicaRequest the replica request
* @param node the node to send the request to
* @param listener callback for handling the response or failure
*/
protected void sendReplicaRequest(
final ConcreteReplicaRequest<ReplicaRequest> replicaRequest,
final DiscoveryNode node,
final ActionListener<ReplicationOperation.ReplicaResponse> listener) {
final ActionListenerResponseHandler<ReplicaResponse> handler = new ActionListenerResponseHandler<>(listener, in -> {
ReplicaResponse replicaResponse = new ReplicaResponse();
replicaResponse.readFrom(in);
return replicaResponse;
});
transportService.sendRequest(node, transportReplicaAction, replicaRequest, transportOptions, handler);
}
/** a wrapper class to encapsulate a request when being sent to a specific allocation id **/ /** a wrapper class to encapsulate a request when being sent to a specific allocation id **/
public static class ConcreteShardRequest<R extends TransportRequest> extends TransportRequest { public static class ConcreteShardRequest<R extends TransportRequest> extends TransportRequest {
/** {@link AllocationId#getId()} of the shard this request is sent to **/ /** {@link AllocationId#getId()} of the shard this request is sent to **/
private String targetAllocationID; private final String targetAllocationID;
private final long primaryTerm;
private final R request;
private long primaryTerm; public ConcreteShardRequest(Writeable.Reader<R> requestReader, StreamInput in) throws IOException {
targetAllocationID = in.readString();
private R request; primaryTerm = in.readVLong();
request = requestReader.read(in);
public ConcreteShardRequest(Supplier<R> requestSupplier) {
request = requestSupplier.get();
// null now, but will be populated by reading from the streams
targetAllocationID = null;
primaryTerm = UNASSIGNED_PRIMARY_TERM;
} }
public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) { public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) {
@ -1149,10 +1129,8 @@ public abstract class TransportReplicationAction<
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) {
targetAllocationID = in.readString(); throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
primaryTerm = in.readVLong();
request.readFrom(in);
} }
@Override @Override
@ -1182,23 +1160,11 @@ public abstract class TransportReplicationAction<
protected static final class ConcreteReplicaRequest<R extends TransportRequest> extends ConcreteShardRequest<R> { protected static final class ConcreteReplicaRequest<R extends TransportRequest> extends ConcreteShardRequest<R> {
private long globalCheckpoint; private final long globalCheckpoint;
private long maxSeqNoOfUpdatesOrDeletes; private final long maxSeqNoOfUpdatesOrDeletes;
public ConcreteReplicaRequest(final Supplier<R> requestSupplier) { public ConcreteReplicaRequest(Writeable.Reader<R> requestReader, StreamInput in) throws IOException {
super(requestSupplier); super(requestReader, in);
}
public ConcreteReplicaRequest(final R request, final String targetAllocationID, final long primaryTerm,
final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes) {
super(request, targetAllocationID, primaryTerm);
this.globalCheckpoint = globalCheckpoint;
this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
globalCheckpoint = in.readZLong(); globalCheckpoint = in.readZLong();
} else { } else {
@ -1213,6 +1179,18 @@ public abstract class TransportReplicationAction<
} }
} }
public ConcreteReplicaRequest(final R request, final String targetAllocationID, final long primaryTerm,
final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes) {
super(request, targetAllocationID, primaryTerm);
this.globalCheckpoint = globalCheckpoint;
this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes;
}
@Override
public void readFrom(StreamInput in) {
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
}
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
@ -47,7 +48,6 @@ import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
/** /**
* Base class for transport actions that modify data in some shard like index, delete, and shardBulk. * Base class for transport actions that modify data in some shard like index, delete, and shardBulk.
@ -62,8 +62,8 @@ public abstract class TransportWriteAction<
protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, protected TransportWriteAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool,
ShardStateAction shardStateAction, ActionFilters actionFilters, ShardStateAction shardStateAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request, IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader<Request> request,
Supplier<ReplicaRequest> replicaRequest, String executor, boolean forceExecutionOnPrimary) { Writeable.Reader<ReplicaRequest> replicaRequest, String executor, boolean forceExecutionOnPrimary) {
super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
indexNameExpressionResolver, request, replicaRequest, executor, true, forceExecutionOnPrimary); indexNameExpressionResolver, request, replicaRequest, executor, true, forceExecutionOnPrimary);
} }

View File

@ -845,8 +845,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
retryOnConflict = in.readVInt(); retryOnConflict = in.readVInt();
refreshPolicy = RefreshPolicy.readFrom(in); refreshPolicy = RefreshPolicy.readFrom(in);
if (in.readBoolean()) { if (in.readBoolean()) {
doc = new IndexRequest(); doc = new IndexRequest(in);
doc.readFrom(in);
} }
if (in.getVersion().before(Version.V_7_0_0)) { if (in.getVersion().before(Version.V_7_0_0)) {
String[] fields = in.readOptionalStringArray(); String[] fields = in.readOptionalStringArray();
@ -856,8 +855,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
} }
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) { if (in.readBoolean()) {
upsertRequest = new IndexRequest(); upsertRequest = new IndexRequest(in);
upsertRequest.readFrom(in);
} }
docAsUpsert = in.readBoolean(); docAsUpsert = in.readBoolean();
if (in.getVersion().before(Version.V_7_0_0)) { if (in.getVersion().before(Version.V_7_0_0)) {

View File

@ -293,13 +293,13 @@ public final class AnalysisRegistry implements Closeable {
T factory = null; T factory = null;
if (typeName == null) { if (typeName == null) {
if (currentSettings.get("tokenizer") != null) { if (currentSettings.get("tokenizer") != null) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
} else { } else {
throw new IllegalArgumentException(component + " [" + name + "] " + throw new IllegalArgumentException(component + " [" + name + "] " +
"must specify either an analyzer type, or a tokenizer"); "must specify either an analyzer type, or a tokenizer");
} }
} else if (typeName.equals("custom")) { } else if (typeName.equals("custom")) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
} }
if (factory != null) { if (factory != null) {
factories.put(name, factory); factories.put(name, factory);
@ -430,8 +430,10 @@ public final class AnalysisRegistry implements Closeable {
Map<String, NamedAnalyzer> normalizers = new HashMap<>(); Map<String, NamedAnalyzer> normalizers = new HashMap<>();
Map<String, NamedAnalyzer> whitespaceNormalizers = new HashMap<>(); Map<String, NamedAnalyzer> whitespaceNormalizers = new HashMap<>();
for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) { for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
processAnalyzerFactory(indexSettings, entry.getKey(), entry.getValue(), analyzers, analyzers.merge(entry.getKey(), produceAnalyzer(entry.getKey(), entry.getValue(), tokenFilterFactoryFactories,
tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); charFilterFactoryFactories, tokenizerFactoryFactories), (k, v) -> {
throw new IllegalStateException("already registered analyzer with name: " + entry.getKey());
});
} }
for (Map.Entry<String, AnalyzerProvider<?>> entry : normalizerProviders.entrySet()) { for (Map.Entry<String, AnalyzerProvider<?>> entry : normalizerProviders.entrySet()) {
processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, "keyword", processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, "keyword",
@ -441,9 +443,9 @@ public final class AnalysisRegistry implements Closeable {
} }
if (!analyzers.containsKey("default")) { if (!analyzers.containsKey("default")) {
processAnalyzerFactory(indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, NamedAnalyzer defaultAnalyzer = produceAnalyzer("default", new StandardAnalyzerProvider(indexSettings, null, "default",
"default", Settings.Builder.EMPTY_SETTINGS), Settings.Builder.EMPTY_SETTINGS), tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); analyzers.put("default", defaultAnalyzer);
} }
if (!analyzers.containsKey("default_search")) { if (!analyzers.containsKey("default_search")) {
analyzers.put("default_search", analyzers.get("default")); analyzers.put("default_search", analyzers.get("default"));
@ -473,11 +475,9 @@ public final class AnalysisRegistry implements Closeable {
whitespaceNormalizers); whitespaceNormalizers);
} }
private void processAnalyzerFactory(IndexSettings indexSettings, private static NamedAnalyzer produceAnalyzer(String name, AnalyzerProvider<?> analyzerFactory,
String name, Map<String, TokenFilterFactory> tokenFilters, Map<String, CharFilterFactory> charFilters,
AnalyzerProvider<?> analyzerFactory, Map<String, TokenizerFactory> tokenizers) {
Map<String, NamedAnalyzer> analyzers, Map<String, TokenFilterFactory> tokenFilters,
Map<String, CharFilterFactory> charFilters, Map<String, TokenizerFactory> tokenizers) {
/* /*
* Lucene defaults positionIncrementGap to 0 in all analyzers but * Lucene defaults positionIncrementGap to 0 in all analyzers but
* Elasticsearch defaults them to 0 only before version 2.0 * Elasticsearch defaults them to 0 only before version 2.0
@ -511,15 +511,7 @@ public final class AnalysisRegistry implements Closeable {
} else { } else {
analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap); analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
} }
if (analyzers.containsKey(name)) { return analyzer;
throw new IllegalStateException("already registered analyzer with name: " + name);
}
analyzers.put(name, analyzer);
// TODO: remove alias support completely when we no longer support pre 5.0 indices
final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias";
if (indexSettings.getSettings().get(analyzerAliasKey) != null) {
throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported");
}
} }
private void processNormalizerFactory( private void processNormalizerFactory(

Some files were not shown because too many files have changed in this diff Show More