Merge branch 'master' into ccr
* master: Remove the index thread pool (#29556) Remove extra copy in ScriptDocValues.Strings Fix full cluster restart test recovery (#29545) Fix binary doc values fetching in _search (#29567) Mutes failing MovAvgIT tests Fix the assertion message for an incorrect current version. (#29572) Fix the version ID for v5.6.10. (#29570) Painless Spec Documentation Clean Up (#29441) Add versions 5.6.10 and 6.2.5 [TEST] test against scaled value instead of fixed epsilon in MovAvgIT Remove `flatSettings` support from request classes (#29560) MapperService to wrap a single DocumentMapper. (#29511) Fix dependency checks on libs when generating Eclipse configuration. (#29550) Add null_value support to geo_point type (#29451) Add documentation about the include_type_name option. (#29555) Enforce translog access via engine (#29542)
This commit is contained in:
commit
4be1488324
|
@ -572,7 +572,6 @@ public final class Request {
|
||||||
|
|
||||||
static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException {
|
static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException {
|
||||||
Params parameters = Params.builder();
|
Params parameters = Params.builder();
|
||||||
parameters.withFlatSettings(clusterUpdateSettingsRequest.flatSettings());
|
|
||||||
parameters.withTimeout(clusterUpdateSettingsRequest.timeout());
|
parameters.withTimeout(clusterUpdateSettingsRequest.timeout());
|
||||||
parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout());
|
parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout());
|
||||||
HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
|
HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||||
|
@ -603,7 +602,6 @@ public final class Request {
|
||||||
params.withLocal(request.local());
|
params.withLocal(request.local());
|
||||||
params.withHuman(request.humanReadable());
|
params.withHuman(request.humanReadable());
|
||||||
params.withIndicesOptions(request.indicesOptions());
|
params.withIndicesOptions(request.indicesOptions());
|
||||||
params.withFlatSettings(request.flatSettings());
|
|
||||||
params.withIncludeDefaults(request.includeDefaults());
|
params.withIncludeDefaults(request.includeDefaults());
|
||||||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||||
}
|
}
|
||||||
|
@ -613,7 +611,6 @@ public final class Request {
|
||||||
parameters.withTimeout(updateSettingsRequest.timeout());
|
parameters.withTimeout(updateSettingsRequest.timeout());
|
||||||
parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout());
|
parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout());
|
||||||
parameters.withIndicesOptions(updateSettingsRequest.indicesOptions());
|
parameters.withIndicesOptions(updateSettingsRequest.indicesOptions());
|
||||||
parameters.withFlatSettings(updateSettingsRequest.flatSettings());
|
|
||||||
parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting());
|
parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting());
|
||||||
|
|
||||||
String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices();
|
String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices();
|
||||||
|
|
|
@ -272,7 +272,6 @@ public class RequestTests extends ESTestCase {
|
||||||
Map<String, String> expectedParams = new HashMap<>();
|
Map<String, String> expectedParams = new HashMap<>();
|
||||||
setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams);
|
setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams);
|
||||||
setRandomLocal(getIndexRequest, expectedParams);
|
setRandomLocal(getIndexRequest, expectedParams);
|
||||||
setRandomFlatSettings(getIndexRequest::flatSettings, expectedParams);
|
|
||||||
setRandomHumanReadable(getIndexRequest, expectedParams);
|
setRandomHumanReadable(getIndexRequest, expectedParams);
|
||||||
setRandomIncludeDefaults(getIndexRequest, expectedParams);
|
setRandomIncludeDefaults(getIndexRequest, expectedParams);
|
||||||
|
|
||||||
|
@ -1292,7 +1291,6 @@ public class RequestTests extends ESTestCase {
|
||||||
public void testClusterPutSettings() throws IOException {
|
public void testClusterPutSettings() throws IOException {
|
||||||
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
|
||||||
Map<String, String> expectedParams = new HashMap<>();
|
Map<String, String> expectedParams = new HashMap<>();
|
||||||
setRandomFlatSettings(request::flatSettings, expectedParams);
|
|
||||||
setRandomMasterTimeout(request, expectedParams);
|
setRandomMasterTimeout(request, expectedParams);
|
||||||
setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||||
|
|
||||||
|
@ -1344,7 +1342,6 @@ public class RequestTests extends ESTestCase {
|
||||||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2);
|
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2);
|
||||||
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices);
|
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices);
|
||||||
Map<String, String> expectedParams = new HashMap<>();
|
Map<String, String> expectedParams = new HashMap<>();
|
||||||
setRandomFlatSettings(updateSettingsRequest::flatSettings, expectedParams);
|
|
||||||
setRandomMasterTimeout(updateSettingsRequest, expectedParams);
|
setRandomMasterTimeout(updateSettingsRequest, expectedParams);
|
||||||
setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||||
setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams);
|
setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams);
|
||||||
|
@ -1627,16 +1624,6 @@ public class RequestTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setRandomFlatSettings(Consumer<Boolean> setter, Map<String, String> expectedParams) {
|
|
||||||
if (randomBoolean()) {
|
|
||||||
boolean flatSettings = randomBoolean();
|
|
||||||
setter.accept(flatSettings);
|
|
||||||
if (flatSettings) {
|
|
||||||
expectedParams.put("flat_settings", String.valueOf(flatSettings));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void setRandomMasterTimeout(MasterNodeRequest<?> request, Map<String, String> expectedParams) {
|
private static void setRandomMasterTimeout(MasterNodeRequest<?> request, Map<String, String> expectedParams) {
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
String masterTimeout = randomTimeValue();
|
String masterTimeout = randomTimeValue();
|
||||||
|
|
|
@ -124,10 +124,6 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||||
request.masterNodeTimeout("1m"); // <2>
|
request.masterNodeTimeout("1m"); // <2>
|
||||||
// end::put-settings-request-masterTimeout
|
// end::put-settings-request-masterTimeout
|
||||||
|
|
||||||
// tag::put-settings-request-flat-settings
|
|
||||||
request.flatSettings(true); // <1>
|
|
||||||
// end::put-settings-request-flat-settings
|
|
||||||
|
|
||||||
// tag::put-settings-execute
|
// tag::put-settings-execute
|
||||||
ClusterUpdateSettingsResponse response = client.cluster().putSettings(request);
|
ClusterUpdateSettingsResponse response = client.cluster().putSettings(request);
|
||||||
// end::put-settings-execute
|
// end::put-settings-execute
|
||||||
|
|
|
@ -58,7 +58,6 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||||
import org.elasticsearch.client.RestHighLevelClient;
|
import org.elasticsearch.client.RestHighLevelClient;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
@ -114,8 +113,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||||
request.local(false); // <1>
|
request.local(false); // <1>
|
||||||
request.humanReadable(true); // <2>
|
request.humanReadable(true); // <2>
|
||||||
request.includeDefaults(false); // <3>
|
request.includeDefaults(false); // <3>
|
||||||
request.flatSettings(false); // <4>
|
request.indicesOptions(indicesOptions); // <4>
|
||||||
request.indicesOptions(indicesOptions); // <5>
|
|
||||||
// end::indices-exists-request-optionals
|
// end::indices-exists-request-optionals
|
||||||
|
|
||||||
// tag::indices-exists-response
|
// tag::indices-exists-response
|
||||||
|
@ -1433,9 +1431,6 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||||
// end::put-settings-settings-source
|
// end::put-settings-settings-source
|
||||||
}
|
}
|
||||||
|
|
||||||
// tag::put-settings-request-flat-settings
|
|
||||||
request.flatSettings(true); // <1>
|
|
||||||
// end::put-settings-request-flat-settings
|
|
||||||
// tag::put-settings-request-preserveExisting
|
// tag::put-settings-request-preserveExisting
|
||||||
request.setPreserveExisting(false); // <1>
|
request.setPreserveExisting(false); // <1>
|
||||||
// end::put-settings-request-preserveExisting
|
// end::put-settings-request-preserveExisting
|
||||||
|
|
|
@ -54,13 +54,6 @@ include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-setti
|
||||||
==== Optional Arguments
|
==== Optional Arguments
|
||||||
The following arguments can optionally be provided:
|
The following arguments can optionally be provided:
|
||||||
|
|
||||||
["source","java",subs="attributes,callouts,macros"]
|
|
||||||
--------------------------------------------------
|
|
||||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings]
|
|
||||||
--------------------------------------------------
|
|
||||||
<1> Whether the updated settings returned in the `ClusterUpdateSettings` should
|
|
||||||
be in a flat format
|
|
||||||
|
|
||||||
["source","java",subs="attributes,callouts,macros"]
|
["source","java",subs="attributes,callouts,macros"]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-timeout]
|
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-timeout]
|
||||||
|
|
|
@ -23,8 +23,7 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-req
|
||||||
<1> Whether to return local information or retrieve the state from master node
|
<1> Whether to return local information or retrieve the state from master node
|
||||||
<2> Return result in a format suitable for humans
|
<2> Return result in a format suitable for humans
|
||||||
<3> Whether to return all default setting for each of the indices
|
<3> Whether to return all default setting for each of the indices
|
||||||
<4> Return settings in flat format
|
<4> Controls how unavailable indices are resolved and how wildcard expressions are expanded
|
||||||
<5> Controls how unavailable indices are resolved and how wildcard expressions are expanded
|
|
||||||
|
|
||||||
[[java-rest-high-indices-sync]]
|
[[java-rest-high-indices-sync]]
|
||||||
==== Synchronous Execution
|
==== Synchronous Execution
|
||||||
|
|
|
@ -55,13 +55,6 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-setti
|
||||||
==== Optional Arguments
|
==== Optional Arguments
|
||||||
The following arguments can optionally be provided:
|
The following arguments can optionally be provided:
|
||||||
|
|
||||||
["source","java",subs="attributes,callouts,macros"]
|
|
||||||
--------------------------------------------------
|
|
||||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-flat-settings]
|
|
||||||
--------------------------------------------------
|
|
||||||
<1> Whether the updated settings returned in the `UpdateSettings` should
|
|
||||||
be in a flat format
|
|
||||||
|
|
||||||
["source","java",subs="attributes,callouts,macros"]
|
["source","java",subs="attributes,callouts,macros"]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting]
|
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting]
|
||||||
|
|
|
@ -5,39 +5,6 @@ include::../Versions.asciidoc[]
|
||||||
|
|
||||||
include::painless-getting-started.asciidoc[]
|
include::painless-getting-started.asciidoc[]
|
||||||
|
|
||||||
// include::painless-examples.asciidoc[]
|
|
||||||
|
|
||||||
// include::painless-design.asciidoc[]
|
|
||||||
|
|
||||||
include::painless-lang-spec.asciidoc[]
|
include::painless-lang-spec.asciidoc[]
|
||||||
|
|
||||||
include::painless-syntax.asciidoc[]
|
|
||||||
|
|
||||||
include::painless-api-reference.asciidoc[]
|
include::painless-api-reference.asciidoc[]
|
||||||
|
|
||||||
////
|
|
||||||
Proposed Outline (WIP)
|
|
||||||
Getting Started with Painless
|
|
||||||
Accessing Doc Values
|
|
||||||
Updating Fields
|
|
||||||
Working with Dates
|
|
||||||
Using Regular Expressions
|
|
||||||
Debugging Painless Scripts
|
|
||||||
|
|
||||||
Example Scripts
|
|
||||||
Using Painless in Script Fields
|
|
||||||
Using Painless in Watches
|
|
||||||
Using Painless in Function Score Queries
|
|
||||||
Using Painless in Script Queries
|
|
||||||
Using Painless When Updating Docs
|
|
||||||
Using Painless When Reindexing
|
|
||||||
|
|
||||||
How Painless Works
|
|
||||||
Painless Architecture
|
|
||||||
Dispatching Functions
|
|
||||||
|
|
||||||
Painless Language Specification
|
|
||||||
Painless API
|
|
||||||
////
|
|
||||||
|
|
||||||
Painless API Reference
|
|
||||||
|
|
|
@ -1,17 +1,13 @@
|
||||||
["appendix",id="painless-api-reference"]
|
[[painless-api-reference]]
|
||||||
= Painless API Reference
|
== Painless API Reference
|
||||||
|
|
||||||
Painless has a strict whitelist for methods and
|
Painless has a strict whitelist for methods and classes to ensure all
|
||||||
classes to make sure that all painless scripts are secure and fast. Most of
|
painless scripts are secure. Most of these methods are exposed directly
|
||||||
these methods are exposed directly from the JRE while others are part of
|
from the Java Runtime Enviroment (JRE) while others are part of
|
||||||
Elasticsearch or Painless itself. Below is a list of all available methods
|
Elasticsearch or Painless itself. Below is a list of all available
|
||||||
grouped under the classes on which you can call them. Clicking on the method
|
classes grouped with their respected methods. Clicking on the method
|
||||||
name takes you to the documentation for the method.
|
name takes you to the documentation for that specific method. Methods
|
||||||
|
defined in the JRE also have a `(java 9)` link which can be used to see
|
||||||
NOTE: Methods defined in the JRE also have a `(java 9)` link which can be used
|
the method's documentation in Java 9.
|
||||||
to see the method's documentation in Java 9 while clicking on the method's name
|
|
||||||
goes to the Java 8 documentation. Usually these aren't different but it is
|
|
||||||
worth going to the version that matches the version of Java you are using to
|
|
||||||
run Elasticsearch just in case.
|
|
||||||
|
|
||||||
include::painless-api-reference/index.asciidoc[]
|
include::painless-api-reference/index.asciidoc[]
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
[[painless-casting]]
|
||||||
|
=== Casting
|
||||||
|
|
||||||
|
Casting is the conversion of one type to another. Implicit casts are casts that
|
||||||
|
occur automatically, such as during an assignment operation. Explicit casts are
|
||||||
|
casts where you use the casting operator to explicitly convert one type to
|
||||||
|
another. This is necessary during operations where the cast cannot be inferred.
|
||||||
|
|
||||||
|
To cast to a new type, precede the expression by the new type enclosed in
|
||||||
|
parentheses, for example
|
||||||
|
`(int)x`.
|
||||||
|
|
||||||
|
The following sections specify the implicit casts that can be performed and the
|
||||||
|
explicit casts that are allowed. The only other permitted cast is casting
|
||||||
|
a single character `String` to a `char`.
|
||||||
|
|
||||||
|
*Grammar:*
|
||||||
|
[source,ANTLR4]
|
||||||
|
----
|
||||||
|
cast: '(' TYPE ')' expression
|
||||||
|
----
|
||||||
|
|
||||||
|
[[numeric-casting]]
|
||||||
|
==== Numeric Casting
|
||||||
|
|
||||||
|
The following table shows the allowed implicit and explicit casts between
|
||||||
|
numeric types. Read the table by row. To find out if you need to explicitly
|
||||||
|
cast from type A to type B, find the row for type A and scan across to the
|
||||||
|
column for type B.
|
||||||
|
|
||||||
|
IMPORTANT: Explicit casts between numeric types can result in some data loss. A
|
||||||
|
smaller numeric type cannot necessarily accommodate the value from a larger
|
||||||
|
numeric type. You might also lose precision when casting from integer types
|
||||||
|
to floating point types.
|
||||||
|
|
||||||
|
|====
|
||||||
|
| | byte | short | char | int | long | float | double
|
||||||
|
| byte | | implicit | implicit | implicit | implicit | implicit | implicit
|
||||||
|
| short | explicit | | explicit | implicit | implicit | implicit | implicit
|
||||||
|
| char | explicit | explicit | | implicit | implicit | implicit | implicit
|
||||||
|
| int | explicit | explicit | explicit | | implicit | implicit | implicit
|
||||||
|
| long | explicit | explicit | explicit | explicit | | implicit | implicit
|
||||||
|
| float | explicit | explicit | explicit | explicit | explicit | | implicit
|
||||||
|
| double | explicit | explicit | explicit | explicit | explicit | explicit |
|
||||||
|
|====
|
||||||
|
|
||||||
|
|
||||||
|
Example(s)
|
||||||
|
[source,Java]
|
||||||
|
----
|
||||||
|
int a = 1; // Declare int variable a and set it to the literal
|
||||||
|
// value 1
|
||||||
|
long b = a; // Declare long variable b and set it to int variable
|
||||||
|
// a with an implicit cast to convert from int to long
|
||||||
|
short c = (short)b; // Declare short variable c, explicitly cast b to a
|
||||||
|
// short, and assign b to c
|
||||||
|
byte d = a; // ERROR: Casting an int to a byte requires an explicit
|
||||||
|
// cast
|
||||||
|
double e = (double)a; // Explicitly cast int variable a to a double and assign
|
||||||
|
// it to the double variable e. The explicit cast is
|
||||||
|
// allowed, but it is not necessary.
|
||||||
|
----
|
||||||
|
|
||||||
|
[[reference-casting]]
|
||||||
|
==== Reference Casting
|
||||||
|
|
||||||
|
A reference type can be implicitly cast to another reference type as long as
|
||||||
|
the type being cast _from_ is a descendant of the type being cast _to_. A
|
||||||
|
reference type can be explicitly cast _to_ if the type being cast to is a
|
||||||
|
descendant of the type being cast _from_.
|
||||||
|
|
||||||
|
*Examples:*
|
||||||
|
[source,Java]
|
||||||
|
----
|
||||||
|
List x; // Declare List variable x
|
||||||
|
ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a
|
||||||
|
// newly allocated ArrayList [1]
|
||||||
|
x = y; // Assign Arraylist y to List x using an
|
||||||
|
// implicit cast
|
||||||
|
y = (ArrayList)x; // Explicitly cast List x to an ArrayList and
|
||||||
|
// assign it to ArrayList y
|
||||||
|
x = (List)y; // Set List x to ArrayList y using an explicit
|
||||||
|
// cast (the explicit cast is not necessary)
|
||||||
|
y = x; // ERROR: List x cannot be implicitly cast to
|
||||||
|
// an ArrayList, an explicit cast is required
|
||||||
|
Map m = y; // ERROR: Cannot implicitly or explicitly cast [2]
|
||||||
|
// an ArrayList to a Map, no relationship
|
||||||
|
// exists between the two types.
|
||||||
|
----
|
||||||
|
[1] `ArrayList` is a descendant of the `List` type.
|
||||||
|
[2] `Map` is unrelated to the `List` and `ArrayList` types.
|
||||||
|
|
||||||
|
[[def-type-casting]]
|
||||||
|
==== def Type Casting
|
||||||
|
All primitive and reference types can always be implicitly cast to
|
||||||
|
`def`. While it is possible to explicitly cast to `def`, it is not necessary.
|
||||||
|
|
||||||
|
However, it is not always possible to implicitly cast a `def` to other
|
||||||
|
primitive and reference types. An explicit cast is required if an explicit
|
||||||
|
cast would normally be required between the non-def types.
|
||||||
|
|
||||||
|
|
||||||
|
*Examples:*
|
||||||
|
[source,Java]
|
||||||
|
----
|
||||||
|
def x; // Declare def variable x and set it to null
|
||||||
|
x = 3; // Set the def variable x to the literal 3 with an implicit
|
||||||
|
// cast from int to def
|
||||||
|
double a = x; // Declare double variable a and set it to def variable x,
|
||||||
|
// which contains a double
|
||||||
|
int b = x; // ERROR: Results in a run-time error because an explicit cast is
|
||||||
|
// required to cast from a double to an int
|
||||||
|
int c = (int)x; // Declare int variable c, explicitly cast def variable x to an
|
||||||
|
// int, and assign x to c
|
||||||
|
----
|
||||||
|
|
||||||
|
[[boxing-unboxing]]
|
||||||
|
==== Boxing and Unboxing
|
||||||
|
|
||||||
|
Boxing is where a cast is used to convert a primitive type to its corresponding
|
||||||
|
reference type. Unboxing is the reverse, converting a reference type to the
|
||||||
|
corresponding primitive type.
|
||||||
|
|
||||||
|
There are two places Painless performs implicit boxing and unboxing:
|
||||||
|
|
||||||
|
* When you call methods, Painless automatically boxes and unboxes arguments
|
||||||
|
so you can specify either primitive types or their corresponding reference
|
||||||
|
types.
|
||||||
|
* When you use the `def` type, Painless automatically boxes and unboxes as
|
||||||
|
needed when converting to and from `def`.
|
||||||
|
|
||||||
|
The casting operator does not support any way to explicitly box a primitive
|
||||||
|
type or unbox a reference type.
|
||||||
|
|
||||||
|
If a primitive type needs to be converted to a reference type, the Painless
|
||||||
|
reference type API supports methods that can do that. However, under normal
|
||||||
|
circumstances this should not be necessary.
|
||||||
|
|
||||||
|
*Examples:*
|
||||||
|
[source,Java]
|
||||||
|
----
|
||||||
|
Integer x = 1; // ERROR: not a legal implicit cast
|
||||||
|
Integer y = (Integer)1; // ERROR: not a legal explicit cast
|
||||||
|
int a = new Integer(1); // ERROR: not a legal implicit cast
|
||||||
|
int b = (int)new Integer(1); // ERROR: not a legal explicit cast
|
||||||
|
----
|
||||||
|
|
||||||
|
[[promotion]]
|
||||||
|
==== Promotion
|
||||||
|
|
||||||
|
Promotion is where certain operations require types to be either a minimum
|
||||||
|
numerical type or for two (or more) types to be equivalent.
|
||||||
|
The documentation for each operation that has these requirements
|
||||||
|
includes promotion tables that describe how this is handled.
|
||||||
|
|
||||||
|
When an operation promotes a type or types, the resultant type
|
||||||
|
of the operation is the promoted type. Types can be promoted to def
|
||||||
|
at compile-time; however, at run-time, the resultant type will be the
|
||||||
|
promotion of the types the `def` is representing.
|
||||||
|
|
||||||
|
*Examples:*
|
||||||
|
[source,Java]
|
||||||
|
----
|
||||||
|
2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal
|
||||||
|
// 2 is promoted to a double and the resulting value is a double.
|
||||||
|
|
||||||
|
def x = 1; // Declare def variable x and set it to the literal int 1 through
|
||||||
|
// an implicit cast
|
||||||
|
x + 2.0F // Add def variable x and the literal float 2.0.
|
||||||
|
// At compile-time the types are promoted to def.
|
||||||
|
// At run-time the types are promoted to float.
|
||||||
|
----
|
|
@ -0,0 +1,51 @@
|
||||||
|
[[painless-comments]]
|
||||||
|
=== Comments
|
||||||
|
|
||||||
|
Painless supports both single-line and multi-line comments. Comments can be
|
||||||
|
included anywhere within a script. Use the `//` token anywhere on a line to
|
||||||
|
specify a single-line comment. All characters from the `//` token to the end
|
||||||
|
of the line are ignored. Use an opening `/*` token and a closing `*/` token
|
||||||
|
to specify a multi-line comment. Multi-line comments can start anywhere on a
|
||||||
|
line, and all characters in between the `/*` token and `*/` token are ignored.
|
||||||
|
|
||||||
|
*Grammar*
|
||||||
|
[source,ANTLR4]
|
||||||
|
----
|
||||||
|
SINGLE_LINE_COMMENT: '//' .*? [\n\r];
|
||||||
|
MULTI_LINE_COMMENT: '/*' .*? '*/';
|
||||||
|
----
|
||||||
|
|
||||||
|
*Examples*
|
||||||
|
|
||||||
|
Single-line comments.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
|
----
|
||||||
|
// single-line comment
|
||||||
|
|
||||||
|
int value; // single-line comment
|
||||||
|
----
|
||||||
|
|
||||||
|
Multi-line comments.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
|
----
|
||||||
|
/* multi-
|
||||||
|
line
|
||||||
|
comment */
|
||||||
|
|
||||||
|
int value; /* multi-
|
||||||
|
line
|
||||||
|
comment */ value = 0;
|
||||||
|
|
||||||
|
int value; /* multi-line
|
||||||
|
comment */
|
||||||
|
|
||||||
|
/* multi-line
|
||||||
|
comment */ int value;
|
||||||
|
|
||||||
|
int value; /* multi-line
|
||||||
|
comment */ value = 0;
|
||||||
|
|
||||||
|
int value; /* multi-line comment */ value = 0;
|
||||||
|
----
|
|
@ -2,7 +2,7 @@ _Painless_ is a simple, secure scripting language designed specifically for use
|
||||||
with Elasticsearch. It is the default scripting language for Elasticsearch and
|
with Elasticsearch. It is the default scripting language for Elasticsearch and
|
||||||
can safely be used for inline and stored scripts. For a detailed description of
|
can safely be used for inline and stored scripts. For a detailed description of
|
||||||
the Painless syntax and language features, see the
|
the Painless syntax and language features, see the
|
||||||
{painless}/painless-specification.html[Painless Language Specification].
|
{painless}/painless-lang-spec.html[Painless Language Specification].
|
||||||
|
|
||||||
[[painless-features]]
|
[[painless-features]]
|
||||||
You can use Painless anywhere scripts can be used in Elasticsearch. Painless
|
You can use Painless anywhere scripts can be used in Elasticsearch. Painless
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
[[painless-syntax]]
|
[[painless-general-syntax]]
|
||||||
=== Painless Syntax
|
=== General Syntax
|
||||||
|
|
||||||
[float]
|
|
||||||
[[control-flow]]
|
[[control-flow]]
|
||||||
==== Control flow
|
==== Control flow
|
||||||
|
|
||||||
|
@ -17,7 +16,6 @@ for (item : list) {
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
|
|
||||||
[float]
|
|
||||||
[[functions]]
|
[[functions]]
|
||||||
==== Functions
|
==== Functions
|
||||||
|
|
||||||
|
@ -32,7 +30,6 @@ if (isNegative(someVar)) {
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
|
|
||||||
[float]
|
|
||||||
[[lambda-expressions]]
|
[[lambda-expressions]]
|
||||||
==== Lambda expressions
|
==== Lambda expressions
|
||||||
Lambda expressions and method references work the same as in https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java].
|
Lambda expressions and method references work the same as in https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java].
|
||||||
|
@ -49,7 +46,6 @@ list.sort(Integer::compare);
|
||||||
You can make method references to functions within the script with `this`,
|
You can make method references to functions within the script with `this`,
|
||||||
for example `list.sort(this::mycompare)`.
|
for example `list.sort(this::mycompare)`.
|
||||||
|
|
||||||
[float]
|
|
||||||
[[patterns]]
|
[[patterns]]
|
||||||
==== Patterns
|
==== Patterns
|
||||||
|
|
||||||
|
@ -62,7 +58,6 @@ are always constants and compiled efficiently a single time.
|
||||||
Pattern p = /[aeiou]/
|
Pattern p = /[aeiou]/
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
|
|
||||||
[float]
|
|
||||||
[[pattern-flags]]
|
[[pattern-flags]]
|
||||||
===== Pattern flags
|
===== Pattern flags
|
||||||
|
|
||||||
|
@ -84,34 +79,3 @@ Pattern class] using these characters:
|
||||||
|`u` | UNICODE_CASE | `'Ɛ' ==~ /ɛ/iu`
|
|`u` | UNICODE_CASE | `'Ɛ' ==~ /ɛ/iu`
|
||||||
|`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x`
|
|`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x`
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
||||||
[float]
|
|
||||||
[[painless-deref]]
|
|
||||||
==== Dereferences
|
|
||||||
|
|
||||||
Like lots of languages, Painless uses `.` to reference fields and call methods:
|
|
||||||
|
|
||||||
[source,painless]
|
|
||||||
---------------------------------------------------------
|
|
||||||
String foo = 'foo';
|
|
||||||
TypeWithGetterOrPublicField bar = new TypeWithGetterOrPublicField()
|
|
||||||
return foo.length() + bar.x
|
|
||||||
---------------------------------------------------------
|
|
||||||
|
|
||||||
Like Groovy, Painless uses `?.` to perform null-safe references, with the
|
|
||||||
result being `null` if the left hand side is `null`:
|
|
||||||
|
|
||||||
[source,painless]
|
|
||||||
---------------------------------------------------------
|
|
||||||
String foo = null;
|
|
||||||
return foo?.length() // Returns null
|
|
||||||
---------------------------------------------------------
|
|
||||||
|
|
||||||
Unlike Groovy, Painless doesn't support writing to `null` values with this
|
|
||||||
operator:
|
|
||||||
|
|
||||||
[source,painless]
|
|
||||||
---------------------------------------------------------
|
|
||||||
TypeWithSetterOrPublicField foo = null;
|
|
||||||
foo?.x = 'bar' // Compile error
|
|
||||||
---------------------------------------------------------
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
[[painless-keywords]]
|
||||||
|
=== Keywords
|
||||||
|
|
||||||
|
The keywords in the table below are reserved for built-in language
|
||||||
|
features. These keywords cannot be used as <<identifiers, identifiers>> or
|
||||||
|
<<painless-types, types>>.
|
||||||
|
|
||||||
|
[cols="^1,^1,^1,^1,^1"]
|
||||||
|
|====
|
||||||
|
| if | else | while | do | for
|
||||||
|
| in | continue | break | return | new
|
||||||
|
| try | catch | throw | this | instanceof
|
||||||
|
|====
|
|
@ -1,73 +1,34 @@
|
||||||
[[painless-specification]]
|
[[painless-lang-spec]]
|
||||||
== Painless Language Specification
|
== Painless Language Specification
|
||||||
|
|
||||||
Painless uses a Java-style syntax that is similar to Groovy. In fact, most
|
Painless is a scripting language designed for security and performance.
|
||||||
Painless scripts are also valid Groovy, and simple Groovy scripts are typically
|
Painless syntax is similar to Java syntax along with some additional
|
||||||
valid Painless. This specification assumes you have at least a passing
|
features such as dynamic typing, Map and List accessor shortcuts, and array
|
||||||
familiarity with Java and related languages.
|
initializers. As a direct comparison to Java, there are some important
|
||||||
|
differences, especially related to the casting model. For more detailed
|
||||||
Painless is essentially a subset of Java with some additional scripting
|
|
||||||
language features that make scripts easier to write. However, there are some
|
|
||||||
important differences, particularly with the casting model. For more detailed
|
|
||||||
conceptual information about the basic constructs that Java and Painless share,
|
conceptual information about the basic constructs that Java and Painless share,
|
||||||
refer to the corresponding topics in the
|
refer to the corresponding topics in the
|
||||||
https://docs.oracle.com/javase/specs/jls/se8/html/index.html[Java Language
|
https://docs.oracle.com/javase/specs/jls/se8/html/index.html[Java Language
|
||||||
Specification].
|
Specification].
|
||||||
|
|
||||||
Painless scripts are parsed and compiled using the http://www.antlr.org/[ANTLR4]
|
Painless scripts are parsed and compiled using the http://www.antlr.org/[ANTLR4]
|
||||||
and http://asm.ow2.org/[ASM] libraries. Painless scripts are compiled directly
|
and http://asm.ow2.org/[ASM] libraries. Scripts are compiled directly
|
||||||
into Java byte code and executed against a standard Java Virtual Machine. This
|
into Java Virtual Machine (JVM) byte code and executed against a standard JVM.
|
||||||
specification uses ANTLR4 grammar notation to describe the allowed syntax.
|
This specification uses ANTLR4 grammar notation to describe the allowed syntax.
|
||||||
However, the actual Painless grammar is more compact than what is shown here.
|
However, the actual Painless grammar is more compact than what is shown here.
|
||||||
|
|
||||||
[float]
|
include::painless-comments.asciidoc[]
|
||||||
[[comments]]
|
|
||||||
==== Comments
|
|
||||||
|
|
||||||
Painless supports both single-line and multi-line comments. You can include
|
include::painless-keywords.asciidoc[]
|
||||||
comments anywhere within a script.
|
|
||||||
|
|
||||||
Single-line comments are preceded by two slashes: `// comment`. They can be
|
|
||||||
placed anywhere on a line. All characters from the two slashes to the end of
|
|
||||||
the line are ignored.
|
|
||||||
|
|
||||||
Multi-line comments are preceded by a slash-star `/*` and closed by
|
|
||||||
star-slash `*/`. Multi-line comments can start anywhere on a line. All
|
|
||||||
characters from the opening `/*` to the closing `*/` are ignored.
|
|
||||||
|
|
||||||
*Examples:*
|
|
||||||
|
|
||||||
[source,Java]
|
|
||||||
----
|
|
||||||
// single-line comment
|
|
||||||
|
|
||||||
<code> // single-line comment
|
|
||||||
|
|
||||||
/* multi-
|
|
||||||
line
|
|
||||||
comment */
|
|
||||||
|
|
||||||
<code> /* multi-line
|
|
||||||
comment */ <code>
|
|
||||||
|
|
||||||
<code> /* multi-line comment */ <code>
|
|
||||||
----
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[keywords]]
|
|
||||||
==== Keywords
|
|
||||||
|
|
||||||
Painless reserves the following keywords for built-in language features.
|
|
||||||
These keywords cannot be used in other contexts, such as identifiers.
|
|
||||||
|
|
||||||
[cols="^1,^1,^1,^1,^1"]
|
|
||||||
|====
|
|
||||||
| if | else | while | do | for
|
|
||||||
| in | continue | break | return | new
|
|
||||||
| try | catch | throw | this | instanceof
|
|
||||||
|====
|
|
||||||
|
|
||||||
include::painless-literals.asciidoc[]
|
include::painless-literals.asciidoc[]
|
||||||
|
|
||||||
include::painless-variables.asciidoc[]
|
include::painless-variables.asciidoc[]
|
||||||
|
|
||||||
include::painless-types.asciidoc[]
|
include::painless-types.asciidoc[]
|
||||||
|
|
||||||
|
include::painless-casting.asciidoc[]
|
||||||
|
|
||||||
include::painless-operators.asciidoc[]
|
include::painless-operators.asciidoc[]
|
||||||
|
|
||||||
|
include::painless-general-syntax.asciidoc[]
|
||||||
|
|
|
@ -1,94 +1,143 @@
|
||||||
[[literals]]
|
[[painless-literals]]
|
||||||
=== Literals
|
=== Literals
|
||||||
|
|
||||||
Literals are values that you can specify directly in Painless scripts.
|
Use literals to specify different types of values directly in a script.
|
||||||
|
|
||||||
[[integers]]
|
[[integers]]
|
||||||
==== Integers
|
==== Integers
|
||||||
|
|
||||||
Specify integer literals in decimal, octal, or hex notation. Use the following
|
Use integer literals to specify an integer value in decimal, octal, or hex
|
||||||
single letter designations to specify the primitive type: `l` for `long`, `f`
|
notation of the <<primitive-types, primitive types>> `int`, `long`, `float`,
|
||||||
for `float`, and `d` for `double`. If not specified, the type defaults to
|
or `double`. Use the following single letter designations to specify the
|
||||||
`int` (with the exception of certain assignments described later).
|
<<primitive-types, primitive type>>: `l` or `L` for `long`, `f` or `F` for
|
||||||
|
`float`, and `d` or `D` for `double`. If not specified, the type defaults to
|
||||||
|
`int`. Use `0` as a prefix to specify an integer literal as octal, and use
|
||||||
|
`0x` or `0X` as a prefix to specify an integer literal as hex.
|
||||||
|
|
||||||
*Grammar:*
|
*Grammar*
|
||||||
[source,ANTLR4]
|
[source,ANTLR4]
|
||||||
----
|
----
|
||||||
INTEGER: '-'? ( '0' | [1-9] [0-9]* ) [lLfFdD]?;
|
INTEGER: '-'? ( '0' | [1-9] [0-9]* ) [lLfFdD]?;
|
||||||
OCTAL: '-'? '0' [0-7]+ [lL]?;
|
OCTAL: '-'? '0' [0-7]+ [lL]?;
|
||||||
HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?;
|
HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?;
|
||||||
----
|
----
|
||||||
|
|
||||||
*Examples:*
|
*Examples*
|
||||||
[source,Java]
|
|
||||||
|
Integer literals.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
----
|
----
|
||||||
0 // integer literal of 0
|
0 <1>
|
||||||
0D // double literal of 0.0
|
0D <2>
|
||||||
1234L // long literal of 1234
|
1234L <3>
|
||||||
-90F // float literal of -90.0
|
-90f <4>
|
||||||
-022 // integer literal of -18 specified in octal
|
-022 <5>
|
||||||
0xF2A // integer literal of 3882
|
0xF2A <6>
|
||||||
----
|
----
|
||||||
|
|
||||||
[[floating-point-values]]
|
<1> `int 0`
|
||||||
==== Floating Point Values
|
<2> `double 0.0`
|
||||||
|
<3> `long 1234`
|
||||||
|
<4> `float -90.0`
|
||||||
|
<5> `int -18` in octal
|
||||||
|
<6> `int 3882` in hex
|
||||||
|
|
||||||
Specify floating point literals using the following single letter designations
|
[[floats]]
|
||||||
for the primitive type: `f` for `float` and `d` for `double`.
|
==== Floats
|
||||||
If not specified, the type defaults to `double`.
|
|
||||||
|
|
||||||
*Grammar:*
|
Use floating point literals to specify a floating point value of the
|
||||||
|
<<primitive-types, primitive types>> `float` or `double`. Use the following
|
||||||
|
single letter designations to specify the <<primitive-types, primitive type>>:
|
||||||
|
`f` or `F` for `float` and `d` or `D` for `double`. If not specified, the type defaults
|
||||||
|
to `double`.
|
||||||
|
|
||||||
|
*Grammar*
|
||||||
[source,ANTLR4]
|
[source,ANTLR4]
|
||||||
----
|
----
|
||||||
DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? ( [eE] [+\-]? [0-9]+ )? [fFdD]?;
|
DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? EXPONENT? [fFdD]?;
|
||||||
|
EXPONENT: ( [eE] [+\-]? [0-9]+ );
|
||||||
----
|
----
|
||||||
|
|
||||||
*Examples:*
|
*Examples*
|
||||||
[source,Java]
|
|
||||||
|
Floating point literals.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
----
|
----
|
||||||
0.0 // double value of 0.0
|
0.0 <1>
|
||||||
1E6 // double value of 1000000
|
1E6 <2>
|
||||||
0.977777 // double value of 0.97777
|
0.977777 <3>
|
||||||
-126.34 // double value of -126.34
|
-126.34 <4>
|
||||||
89.9F // float value of 89.9
|
89.9F <5>
|
||||||
----
|
----
|
||||||
|
|
||||||
|
<1> `double 0.0`
|
||||||
|
<2> `double 1000000.0` in exponent notation
|
||||||
|
<3> `double 0.977777`
|
||||||
|
<4> `double -126.34`
|
||||||
|
<5> `float 89.9`
|
||||||
|
|
||||||
[[strings]]
|
[[strings]]
|
||||||
==== Strings
|
==== Strings
|
||||||
|
|
||||||
Specify literal string with either single or double quotes. In double-quoted
|
Use string literals to specify string values of the
|
||||||
literal strings, you can escape double-quotes with a backslash to include them
|
<<string-type, String type>> with either single-quotes or double-quotes.
|
||||||
in the string. Similarly, you escape single quotes with a backslash in
|
Use a `\"` token to include a double-quote as part of a double-quoted string
|
||||||
single-quoted literal strings. Backslashes themselves also need to be
|
literal. Use a `\'` token to include a single-quote as part of a single-quoted
|
||||||
escaped with a backslash.
|
string literal. Use a `\\` token to include a backslash as part of any string
|
||||||
|
literal.
|
||||||
|
|
||||||
*Grammar:*
|
*Grammar*
|
||||||
[source,ANTLR4]
|
[source,ANTLR4]
|
||||||
----
|
----
|
||||||
STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) | ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' );
|
STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' )
|
||||||
|
| ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' );
|
||||||
----
|
----
|
||||||
|
|
||||||
*Examples:*
|
*Examples*
|
||||||
[source,Java]
|
|
||||||
|
String literals using single-quotes.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
----
|
----
|
||||||
"double-quoted String literal"
|
'single-quoted string literal'
|
||||||
'single-quoted String literal'
|
'\'single-quoted string with escaped single-quotes\' and backslash \\'
|
||||||
"\"double-quoted String with escaped double-quotes\" and backslash: \\"
|
'single-quoted string with non-escaped "double-quotes"'
|
||||||
'\'single-quoted String with escaped single-quotes\' and backslash \\'
|
|
||||||
"double-quoted String with non-escaped 'single-quotes'"
|
|
||||||
'single-quoted String with non-escaped "double-quotes"'
|
|
||||||
----
|
----
|
||||||
|
|
||||||
[[char]]
|
String literals using double-quotes.
|
||||||
===== Char
|
|
||||||
|
|
||||||
You cannot directly specify character literals in Painless. However, you can
|
[source,Painless]
|
||||||
cast single-character strings to char. Attempting to cast a multi-character
|
----
|
||||||
string to a char throws an error.
|
"double-quoted string literal"
|
||||||
|
"\"double-quoted string with escaped double-quotes\" and backslash: \\"
|
||||||
|
"double-quoted string with non-escaped 'single-quotes'"
|
||||||
|
----
|
||||||
|
|
||||||
*Examples:*
|
[[characters]]
|
||||||
[source,Java]
|
==== Characters
|
||||||
|
|
||||||
|
Use the <<painless-casting, casting operator>> to convert string literals or
|
||||||
|
<<string-type, String>> values into <<primitive-types, char>> values.
|
||||||
|
<<string-type, String>> values converted into
|
||||||
|
<<primitive-types, char>> values must be exactly one character in length
|
||||||
|
or an error will occur.
|
||||||
|
|
||||||
|
*Examples*
|
||||||
|
|
||||||
|
Casting string literals into <<primitive-types, char>> values.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
----
|
----
|
||||||
(char)"C"
|
(char)"C"
|
||||||
(char)'c'
|
(char)'c'
|
||||||
----
|
----
|
||||||
|
|
||||||
|
Casting a <<string-type, String>> value into a <<primitive-types, char>> value.
|
||||||
|
|
||||||
|
[source,Painless]
|
||||||
|
----
|
||||||
|
String s = "s";
|
||||||
|
char c = (char)s;
|
||||||
|
----
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
[[painless-operators]]
|
||||||
=== Operators
|
=== Operators
|
||||||
|
|
||||||
The following is a table of the available operators in Painless. Each operator will have further information and examples outside of the table. Many operators will have a promotion table as described by the documentation on promotion [MARK].
|
The following is a table of the available operators in Painless. Each operator will have further information and examples outside of the table. Many operators will have a promotion table as described by the documentation on promotion [MARK].
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[[types]]
|
[[painless-types]]
|
||||||
=== Data Types
|
=== Types
|
||||||
|
|
||||||
Painless supports both dynamic and static types. Static types are split into
|
Painless supports both dynamic and static types. Static types are split into
|
||||||
_primitive types_ and _reference types_.
|
_primitive types_ and _reference types_.
|
||||||
|
@ -267,176 +267,3 @@ def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to
|
||||||
// a def array with a size of 4 and the
|
// a def array with a size of 4 and the
|
||||||
// values i, l, f*d, and s
|
// values i, l, f*d, and s
|
||||||
----
|
----
|
||||||
|
|
||||||
[[casting]]
|
|
||||||
=== Casting
|
|
||||||
|
|
||||||
Casting is the conversion of one type to another. Implicit casts are casts that
|
|
||||||
occur automatically, such as during an assignment operation. Explicit casts are
|
|
||||||
casts where you use the casting operator to explicitly convert one type to
|
|
||||||
another. This is necessary during operations where the cast cannot be inferred.
|
|
||||||
|
|
||||||
To cast to a new type, precede the expression by the new type enclosed in
|
|
||||||
parentheses, for example
|
|
||||||
`(int)x`.
|
|
||||||
|
|
||||||
The following sections specify the implicit casts that can be performed and the
|
|
||||||
explicit casts that are allowed. The only other permitted cast is casting
|
|
||||||
a single character `String` to a `char`.
|
|
||||||
|
|
||||||
*Grammar:*
|
|
||||||
[source,ANTLR4]
|
|
||||||
----
|
|
||||||
cast: '(' TYPE ')' expression
|
|
||||||
----
|
|
||||||
|
|
||||||
[[numeric-casting]]
|
|
||||||
==== Numeric Casting
|
|
||||||
|
|
||||||
The following table shows the allowed implicit and explicit casts between
|
|
||||||
numeric types. Read the table by row. To find out if you need to explicitly
|
|
||||||
cast from type A to type B, find the row for type A and scan across to the
|
|
||||||
column for type B.
|
|
||||||
|
|
||||||
IMPORTANT: Explicit casts between numeric types can result in some data loss. A
|
|
||||||
smaller numeric type cannot necessarily accommodate the value from a larger
|
|
||||||
numeric type. You might also lose precision when casting from integer types
|
|
||||||
to floating point types.
|
|
||||||
|
|
||||||
|====
|
|
||||||
| | byte | short | char | int | long | float | double
|
|
||||||
| byte | | implicit | implicit | implicit | implicit | implicit | implicit
|
|
||||||
| short | explicit | | explicit | implicit | implicit | implicit | implicit
|
|
||||||
| char | explicit | explicit | | implicit | implicit | implicit | implicit
|
|
||||||
| int | explicit | explicit | explicit | | implicit | implicit | implicit
|
|
||||||
| long | explicit | explicit | explicit | explicit | | implicit | implicit
|
|
||||||
| float | explicit | explicit | explicit | explicit | explicit | | implicit
|
|
||||||
| double | explicit | explicit | explicit | explicit | explicit | explicit |
|
|
||||||
|====
|
|
||||||
|
|
||||||
|
|
||||||
Example(s)
|
|
||||||
[source,Java]
|
|
||||||
----
|
|
||||||
int a = 1; // Declare int variable a and set it to the literal
|
|
||||||
// value 1
|
|
||||||
long b = a; // Declare long variable b and set it to int variable
|
|
||||||
// a with an implicit cast to convert from int to long
|
|
||||||
short c = (short)b; // Declare short variable c, explicitly cast b to a
|
|
||||||
// short, and assign b to c
|
|
||||||
byte d = a; // ERROR: Casting an int to a byte requires an explicit
|
|
||||||
// cast
|
|
||||||
double e = (double)a; // Explicitly cast int variable a to a double and assign
|
|
||||||
// it to the double variable e. The explicit cast is
|
|
||||||
// allowed, but it is not necessary.
|
|
||||||
----
|
|
||||||
|
|
||||||
[[reference-casting]]
|
|
||||||
==== Reference Casting
|
|
||||||
|
|
||||||
A reference type can be implicitly cast to another reference type as long as
|
|
||||||
the type being cast _from_ is a descendant of the type being cast _to_. A
|
|
||||||
reference type can be explicitly cast _to_ if the type being cast to is a
|
|
||||||
descendant of the type being cast _from_.
|
|
||||||
|
|
||||||
*Examples:*
|
|
||||||
[source,Java]
|
|
||||||
----
|
|
||||||
List x; // Declare List variable x
|
|
||||||
ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a
|
|
||||||
// newly allocated ArrayList [1]
|
|
||||||
x = y; // Assign Arraylist y to List x using an
|
|
||||||
// implicit cast
|
|
||||||
y = (ArrayList)x; // Explicitly cast List x to an ArrayList and
|
|
||||||
// assign it to ArrayList y
|
|
||||||
x = (List)y; // Set List x to ArrayList y using an explicit
|
|
||||||
// cast (the explicit cast is not necessary)
|
|
||||||
y = x; // ERROR: List x cannot be implicitly cast to
|
|
||||||
// an ArrayList, an explicit cast is required
|
|
||||||
Map m = y; // ERROR: Cannot implicitly or explicitly cast [2]
|
|
||||||
// an ArrayList to a Map, no relationship
|
|
||||||
// exists between the two types.
|
|
||||||
----
|
|
||||||
[1] `ArrayList` is a descendant of the `List` type.
|
|
||||||
[2] `Map` is unrelated to the `List` and `ArrayList` types.
|
|
||||||
|
|
||||||
[[def-type-casting]]
|
|
||||||
==== def Type Casting
|
|
||||||
All primitive and reference types can always be implicitly cast to
|
|
||||||
`def`. While it is possible to explicitly cast to `def`, it is not necessary.
|
|
||||||
|
|
||||||
However, it is not always possible to implicitly cast a `def` to other
|
|
||||||
primitive and reference types. An explicit cast is required if an explicit
|
|
||||||
cast would normally be required between the non-def types.
|
|
||||||
|
|
||||||
|
|
||||||
*Examples:*
|
|
||||||
[source,Java]
|
|
||||||
----
|
|
||||||
def x; // Declare def variable x and set it to null
|
|
||||||
x = 3; // Set the def variable x to the literal 3 with an implicit
|
|
||||||
// cast from int to def
|
|
||||||
double a = x; // Declare double variable a and set it to def variable x,
|
|
||||||
// which contains a double
|
|
||||||
int b = x; // ERROR: Results in a run-time error because an explicit cast is
|
|
||||||
// required to cast from a double to an int
|
|
||||||
int c = (int)x; // Declare int variable c, explicitly cast def variable x to an
|
|
||||||
// int, and assign x to c
|
|
||||||
----
|
|
||||||
|
|
||||||
[[boxing-unboxing]]
|
|
||||||
==== Boxing and Unboxing
|
|
||||||
|
|
||||||
Boxing is where a cast is used to convert a primitive type to its corresponding
|
|
||||||
reference type. Unboxing is the reverse, converting a reference type to the
|
|
||||||
corresponding primitive type.
|
|
||||||
|
|
||||||
There are two places Painless performs implicit boxing and unboxing:
|
|
||||||
|
|
||||||
* When you call methods, Painless automatically boxes and unboxes arguments
|
|
||||||
so you can specify either primitive types or their corresponding reference
|
|
||||||
types.
|
|
||||||
* When you use the `def` type, Painless automatically boxes and unboxes as
|
|
||||||
needed when converting to and from `def`.
|
|
||||||
|
|
||||||
The casting operator does not support any way to explicitly box a primitive
|
|
||||||
type or unbox a reference type.
|
|
||||||
|
|
||||||
If a primitive type needs to be converted to a reference type, the Painless
|
|
||||||
reference type API supports methods that can do that. However, under normal
|
|
||||||
circumstances this should not be necessary.
|
|
||||||
|
|
||||||
*Examples:*
|
|
||||||
[source,Java]
|
|
||||||
----
|
|
||||||
Integer x = 1; // ERROR: not a legal implicit cast
|
|
||||||
Integer y = (Integer)1; // ERROR: not a legal explicit cast
|
|
||||||
int a = new Integer(1); // ERROR: not a legal implicit cast
|
|
||||||
int b = (int)new Integer(1); // ERROR: not a legal explicit cast
|
|
||||||
----
|
|
||||||
|
|
||||||
[[promotion]]
|
|
||||||
==== Promotion
|
|
||||||
|
|
||||||
Promotion is where certain operations require types to be either a minimum
|
|
||||||
numerical type or for two (or more) types to be equivalent.
|
|
||||||
The documentation for each operation that has these requirements
|
|
||||||
includes promotion tables that describe how this is handled.
|
|
||||||
|
|
||||||
When an operation promotes a type or types, the resultant type
|
|
||||||
of the operation is the promoted type. Types can be promoted to def
|
|
||||||
at compile-time; however, at run-time, the resultant type will be the
|
|
||||||
promotion of the types the `def` is representing.
|
|
||||||
|
|
||||||
*Examples:*
|
|
||||||
[source,Java]
|
|
||||||
----
|
|
||||||
2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal
|
|
||||||
// 2 is promoted to a double and the resulting value is a double.
|
|
||||||
|
|
||||||
def x = 1; // Declare def variable x and set it to the literal int 1 through
|
|
||||||
// an implicit cast
|
|
||||||
x + 2.0F // Add def variable x and the literal float 2.0.
|
|
||||||
// At compile-time the types are promoted to def.
|
|
||||||
// At run-time the types are promoted to float.
|
|
||||||
----
|
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
[[variables]]
|
[[painless-variables]]
|
||||||
=== Variables
|
=== Variables
|
||||||
|
|
||||||
Variables in Painless must be declared and can be statically or <<dynamic-types,
|
Variables in Painless must be declared and can be
|
||||||
dynamically typed>>.
|
statically or <<dynamic-types, dynamically typed>>.
|
||||||
|
|
||||||
[[variable-identifiers]]
|
[[identifiers]]
|
||||||
==== Variable Identifiers
|
==== Identifiers
|
||||||
|
|
||||||
Specify variable identifiers using the following grammar. Variable identifiers
|
Specify variable identifiers using the following grammar. Variable identifiers
|
||||||
must start with a letter or underscore. You cannot use <<keywords, keywords>> or
|
must start with a letter or underscore. You cannot use
|
||||||
<<types, types>> as identifiers.
|
<<painless-keywords, keywords>> or <<painless-types, types>> as identifiers.
|
||||||
|
|
||||||
*Grammar:*
|
*Grammar:*
|
||||||
[source,ANTLR4]
|
[source,ANTLR4]
|
||||||
|
@ -20,7 +20,6 @@ ID: [_a-zA-Z] [_a-zA-Z-0-9]*;
|
||||||
*Examples:*
|
*Examples:*
|
||||||
[source,Java]
|
[source,Java]
|
||||||
----
|
----
|
||||||
_
|
|
||||||
a
|
a
|
||||||
Z
|
Z
|
||||||
id
|
id
|
||||||
|
@ -30,8 +29,8 @@ MAP25
|
||||||
_map25
|
_map25
|
||||||
----
|
----
|
||||||
|
|
||||||
[[variable-declaration]]
|
[[declaration]]
|
||||||
==== Variable Declaration
|
==== Declaration
|
||||||
|
|
||||||
Variables must be declared before you use them. The format is `type-name
|
Variables must be declared before you use them. The format is `type-name
|
||||||
identifier-name`. To declare multiple variables of the same type, specify a
|
identifier-name`. To declare multiple variables of the same type, specify a
|
||||||
|
@ -56,7 +55,7 @@ int i = 10; // Declare the int variable i and set it to the int literal 10
|
||||||
----
|
----
|
||||||
|
|
||||||
[[variable-assignment]]
|
[[variable-assignment]]
|
||||||
==== Variable Assignment
|
==== Assignment
|
||||||
|
|
||||||
Use the equals operator (`=`) to assign a value to a variable. The format is
|
Use the equals operator (`=`) to assign a value to a variable. The format is
|
||||||
`identifier-name = value`. Any value expression can be assigned to any variable
|
`identifier-name = value`. Any value expression can be assigned to any variable
|
||||||
|
|
|
@ -22,7 +22,6 @@ node-0 flush 0 0 0
|
||||||
node-0 force_merge 0 0 0
|
node-0 force_merge 0 0 0
|
||||||
node-0 generic 0 0 0
|
node-0 generic 0 0 0
|
||||||
node-0 get 0 0 0
|
node-0 get 0 0 0
|
||||||
node-0 index 0 0 0
|
|
||||||
node-0 listener 0 0 0
|
node-0 listener 0 0 0
|
||||||
node-0 management 1 0 0
|
node-0 management 1 0 0
|
||||||
node-0 refresh 0 0 0
|
node-0 refresh 0 0 0
|
||||||
|
@ -52,7 +51,6 @@ flush
|
||||||
force_merge
|
force_merge
|
||||||
generic
|
generic
|
||||||
get
|
get
|
||||||
index
|
|
||||||
listener
|
listener
|
||||||
management
|
management
|
||||||
refresh
|
refresh
|
||||||
|
|
|
@ -421,3 +421,108 @@ POST _reindex
|
||||||
----
|
----
|
||||||
// NOTCONSOLE
|
// NOTCONSOLE
|
||||||
|
|
||||||
|
[float]
|
||||||
|
=== Use `include_type_name=false` to prepare for upgrade to 8.0
|
||||||
|
|
||||||
|
Index creation, mappings and document APIs support the `include_type_name`
|
||||||
|
option. When set to `false`, this option enables the behavior that will become
|
||||||
|
default in 8.0 when types are removed. See some examples of interactions with
|
||||||
|
Elasticsearch with this option turned off:
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Index creation
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT index?include_type_name=false
|
||||||
|
{
|
||||||
|
"mappings": {
|
||||||
|
"properties": { <1>
|
||||||
|
"foo": {
|
||||||
|
"type": "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
<1> Mappings are included directly under the `mappings` key, without a type name.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== PUT and GET mappings
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT index
|
||||||
|
|
||||||
|
PUT index/_mappings?include_type_name=false
|
||||||
|
{
|
||||||
|
"properties": { <1>
|
||||||
|
"foo": {
|
||||||
|
"type": "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
GET index/_mappings?include_type_name=false
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
<1> Mappings are included directly under the `mappings` key, without a type name.
|
||||||
|
|
||||||
|
|
||||||
|
The above call returns
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"index": {
|
||||||
|
"mappings": {
|
||||||
|
"properties": { <1>
|
||||||
|
"foo": {
|
||||||
|
"type": "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
<1> Mappings are included directly under the `mappings` key, without a type name.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Document APIs
|
||||||
|
|
||||||
|
Index APIs must be call with the `{index}/_doc` path for automatic generation of
|
||||||
|
the `_id` and `{index}/_doc/{id}` with explicit ids.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT index/_doc/1?include_type_name=false
|
||||||
|
{
|
||||||
|
"foo": "bar"
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"_index": "index", <1>
|
||||||
|
"_id": "1",
|
||||||
|
"_version": 1,
|
||||||
|
"result": "created",
|
||||||
|
"_shards": {
|
||||||
|
"total": 2,
|
||||||
|
"successful": 1,
|
||||||
|
"failed": 0
|
||||||
|
},
|
||||||
|
"_seq_no": 0,
|
||||||
|
"_primary_term": 1
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
<1> The response does not include a `_type`.
|
||||||
|
|
||||||
|
Likewise the <<docs-index_,GET>>, <<docs-delete,`DELETE`>>,
|
||||||
|
<<docs-update,`_update`>> and <<search,`_search`>> APIs do not return a `_type`
|
||||||
|
key in the response when `include_type_name` is set to `false`.
|
||||||
|
|
|
@ -122,6 +122,11 @@ The following parameters are accepted by `geo_point` fields:
|
||||||
ignored. If `false`, geo-points containing any more than latitude and longitude
|
ignored. If `false`, geo-points containing any more than latitude and longitude
|
||||||
(two dimensions) values throw an exception and reject the whole document.
|
(two dimensions) values throw an exception and reject the whole document.
|
||||||
|
|
||||||
|
<<null-value,`null_value`>>::
|
||||||
|
|
||||||
|
Accepts an geopoint value which is substituted for any explicit `null` values.
|
||||||
|
Defaults to `null`, which means the field is treated as missing.
|
||||||
|
|
||||||
==== Using geo-points in scripts
|
==== Using geo-points in scripts
|
||||||
|
|
||||||
When accessing the value of a geo-point in a script, the value is returned as
|
When accessing the value of a geo-point in a script, the value is returned as
|
||||||
|
|
|
@ -6,3 +6,11 @@
|
||||||
|
|
||||||
* The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of
|
* The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of
|
||||||
the `index.percolator.map_unmapped_fields_as_text` setting.
|
the `index.percolator.map_unmapped_fields_as_text` setting.
|
||||||
|
|
||||||
|
==== Index thread pool
|
||||||
|
|
||||||
|
* Internally, single-document index/delete/update requests are executed as bulk
|
||||||
|
requests with a single-document payload. This means that these requests are
|
||||||
|
executed on the bulk thread pool. As such, the indexing thread pool is no
|
||||||
|
longer needed and has been removed. As such, the settings
|
||||||
|
`thread_pool.index.size` and `thread_pool.index.queue_size` have been removed.
|
|
@ -13,12 +13,6 @@ There are several thread pools, but the important ones include:
|
||||||
For generic operations (e.g., background node discovery).
|
For generic operations (e.g., background node discovery).
|
||||||
Thread pool type is `scaling`.
|
Thread pool type is `scaling`.
|
||||||
|
|
||||||
`index`::
|
|
||||||
For index/delete operations. Thread pool type is `fixed`
|
|
||||||
with a size of `# of available processors`,
|
|
||||||
queue_size of `200`. The maximum size for this pool
|
|
||||||
is `1 + # of available processors`.
|
|
||||||
|
|
||||||
`search`::
|
`search`::
|
||||||
For count/search/suggest operations. Thread pool type is
|
For count/search/suggest operations. Thread pool type is
|
||||||
`fixed_auto_queue_size` with a size of
|
`fixed_auto_queue_size` with a size of
|
||||||
|
@ -55,13 +49,13 @@ There are several thread pools, but the important ones include:
|
||||||
Mainly for java client executing of action when listener threaded is set to true.
|
Mainly for java client executing of action when listener threaded is set to true.
|
||||||
Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`.
|
Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`.
|
||||||
|
|
||||||
Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `index`
|
Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `bulk`
|
||||||
thread pool to have more threads:
|
thread pool to have more threads:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
thread_pool:
|
thread_pool:
|
||||||
index:
|
bulk:
|
||||||
size: 30
|
size: 30
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
@ -89,7 +83,7 @@ full, it will abort the request.
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
thread_pool:
|
thread_pool:
|
||||||
index:
|
bulk:
|
||||||
size: 30
|
size: 30
|
||||||
queue_size: 1000
|
queue_size: 1000
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
|
@ -489,7 +489,7 @@ Using `_index` in scripts has been replaced with writing `ScriptEngine` backends
|
||||||
=== Painless Syntax
|
=== Painless Syntax
|
||||||
|
|
||||||
See the
|
See the
|
||||||
{painless}/painless-specification.html[Painless Language Specification]
|
{painless}/painless-lang-spec.html[Painless Language Specification]
|
||||||
in the guide to the {painless}/index.html[Painless Scripting Language].
|
in the guide to the {painless}/index.html[Painless Scripting Language].
|
||||||
|
|
||||||
[role="exclude",id="modules-scripting-painless-debugging"]
|
[role="exclude",id="modules-scripting-painless-debugging"]
|
||||||
|
|
|
@ -34,6 +34,7 @@ subprojects {
|
||||||
Project depProject = dependencyToProject(dep)
|
Project depProject = dependencyToProject(dep)
|
||||||
if (depProject != null
|
if (depProject != null
|
||||||
&& false == depProject.path.equals(':libs:elasticsearch-core')
|
&& false == depProject.path.equals(':libs:elasticsearch-core')
|
||||||
|
&& false == isEclipse
|
||||||
&& depProject.path.startsWith(':libs')) {
|
&& depProject.path.startsWith(':libs')) {
|
||||||
throw new InvalidUserDataException("projects in :libs "
|
throw new InvalidUserDataException("projects in :libs "
|
||||||
+ "may not depend on other projects libs except "
|
+ "may not depend on other projects libs except "
|
||||||
|
|
|
@ -687,8 +687,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
||||||
* Tests recovery of an index with or without a translog and the
|
* Tests recovery of an index with or without a translog and the
|
||||||
* statistics we gather about that.
|
* statistics we gather about that.
|
||||||
*/
|
*/
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29544")
|
public void testRecovery() throws Exception {
|
||||||
public void testRecovery() throws IOException {
|
|
||||||
int count;
|
int count;
|
||||||
boolean shouldHaveTranslog;
|
boolean shouldHaveTranslog;
|
||||||
if (runningAgainstOldCluster) {
|
if (runningAgainstOldCluster) {
|
||||||
|
@ -701,7 +700,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
||||||
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
|
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
|
||||||
|
|
||||||
// make sure all recoveries are done
|
// make sure all recoveries are done
|
||||||
ensureNoInitializingShards();
|
ensureGreen(index);
|
||||||
// Explicitly flush so we're sure to have a bunch of documents in the Lucene index
|
// Explicitly flush so we're sure to have a bunch of documents in the Lucene index
|
||||||
client().performRequest("POST", "/_flush");
|
client().performRequest("POST", "/_flush");
|
||||||
if (shouldHaveTranslog) {
|
if (shouldHaveTranslog) {
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cat.thread_pool:
|
cat.thread_pool:
|
||||||
thread_pool_patterns: bulk,management,flush,index,generic,force_merge
|
thread_pool_patterns: bulk,management,flush,generic,force_merge
|
||||||
h: id,name,active
|
h: id,name,active
|
||||||
v: true
|
v: true
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@
|
||||||
\S+\s+ flush \s+ \d+ \n
|
\S+\s+ flush \s+ \d+ \n
|
||||||
\S+\s+ force_merge \s+ \d+ \n
|
\S+\s+ force_merge \s+ \d+ \n
|
||||||
\S+\s+ generic \s+ \d+ \n
|
\S+\s+ generic \s+ \d+ \n
|
||||||
\S+\s+ index \s+ \d+ \n
|
|
||||||
\S+\s+ management \s+ \d+ \n)+ $/
|
\S+\s+ management \s+ \d+ \n)+ $/
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
|
@ -72,12 +71,11 @@
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cat.thread_pool:
|
cat.thread_pool:
|
||||||
thread_pool_patterns: bulk,index,search
|
thread_pool_patterns: bulk,search
|
||||||
size: ""
|
size: ""
|
||||||
|
|
||||||
- match:
|
- match:
|
||||||
$body: |
|
$body: |
|
||||||
/ #node_name name active queue rejected
|
/ #node_name name active queue rejected
|
||||||
^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n
|
^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n
|
||||||
\S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n
|
|
||||||
\S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/
|
\S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/
|
||||||
|
|
|
@ -117,6 +117,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||||
public static final int V_5_6_9_ID = 5060999;
|
public static final int V_5_6_9_ID = 5060999;
|
||||||
public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||||
|
public static final int V_5_6_10_ID = 5061099;
|
||||||
|
public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||||
public static final Version V_6_0_0_alpha1 =
|
public static final Version V_6_0_0_alpha1 =
|
||||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||||
|
@ -161,6 +163,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||||
public static final int V_6_2_4_ID = 6020499;
|
public static final int V_6_2_4_ID = 6020499;
|
||||||
public static final Version V_6_2_4 = new Version(V_6_2_4_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
public static final Version V_6_2_4 = new Version(V_6_2_4_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||||
|
public static final int V_6_2_5_ID = 6020599;
|
||||||
|
public static final Version V_6_2_5 = new Version(V_6_2_5_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||||
public static final int V_6_3_0_ID = 6030099;
|
public static final int V_6_3_0_ID = 6030099;
|
||||||
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0);
|
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0);
|
||||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||||
|
@ -183,6 +187,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
return V_7_0_0_alpha1;
|
return V_7_0_0_alpha1;
|
||||||
case V_6_3_0_ID:
|
case V_6_3_0_ID:
|
||||||
return V_6_3_0;
|
return V_6_3_0;
|
||||||
|
case V_6_2_5_ID:
|
||||||
|
return V_6_2_5;
|
||||||
case V_6_2_4_ID:
|
case V_6_2_4_ID:
|
||||||
return V_6_2_4;
|
return V_6_2_4;
|
||||||
case V_6_2_3_ID:
|
case V_6_2_3_ID:
|
||||||
|
@ -219,6 +225,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
return V_6_0_0_alpha2;
|
return V_6_0_0_alpha2;
|
||||||
case V_6_0_0_alpha1_ID:
|
case V_6_0_0_alpha1_ID:
|
||||||
return V_6_0_0_alpha1;
|
return V_6_0_0_alpha1;
|
||||||
|
case V_5_6_10_ID:
|
||||||
|
return V_5_6_10;
|
||||||
case V_5_6_9_ID:
|
case V_5_6_9_ID:
|
||||||
return V_5_6_9;
|
return V_5_6_9;
|
||||||
case V_5_6_8_ID:
|
case V_5_6_8_ID:
|
||||||
|
|
|
@ -58,7 +58,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
||||||
PARSER.declareObject((r, t) -> r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT);
|
PARSER.declareObject((r, t) -> r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean flatSettings = false;
|
|
||||||
private Settings transientSettings = EMPTY_SETTINGS;
|
private Settings transientSettings = EMPTY_SETTINGS;
|
||||||
private Settings persistentSettings = EMPTY_SETTINGS;
|
private Settings persistentSettings = EMPTY_SETTINGS;
|
||||||
|
|
||||||
|
@ -74,29 +73,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
||||||
return validationException;
|
return validationException;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the value of "flat_settings".
|
|
||||||
* Used only by the high-level REST client.
|
|
||||||
*
|
|
||||||
* @param flatSettings
|
|
||||||
* value of "flat_settings" flag to be set
|
|
||||||
* @return this request
|
|
||||||
*/
|
|
||||||
public ClusterUpdateSettingsRequest flatSettings(boolean flatSettings) {
|
|
||||||
this.flatSettings = flatSettings;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return settings in flat format.
|
|
||||||
* Used only by the high-level REST client.
|
|
||||||
*
|
|
||||||
* @return <code>true</code> if settings need to be returned in flat format; <code>false</code> otherwise.
|
|
||||||
*/
|
|
||||||
public boolean flatSettings() {
|
|
||||||
return flatSettings;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Settings transientSettings() {
|
public Settings transientSettings() {
|
||||||
return transientSettings;
|
return transientSettings;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,6 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
||||||
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS };
|
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS };
|
||||||
private Feature[] features = DEFAULT_FEATURES;
|
private Feature[] features = DEFAULT_FEATURES;
|
||||||
private boolean humanReadable = false;
|
private boolean humanReadable = false;
|
||||||
private transient boolean flatSettings = false;
|
|
||||||
private transient boolean includeDefaults = false;
|
private transient boolean includeDefaults = false;
|
||||||
|
|
||||||
public GetIndexRequest() {
|
public GetIndexRequest() {
|
||||||
|
@ -118,28 +117,6 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
||||||
return humanReadable;
|
return humanReadable;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the value of "flat_settings".
|
|
||||||
* Used only by the high-level REST client.
|
|
||||||
*
|
|
||||||
* @param flatSettings value of "flat_settings" flag to be set
|
|
||||||
* @return this request
|
|
||||||
*/
|
|
||||||
public GetIndexRequest flatSettings(boolean flatSettings) {
|
|
||||||
this.flatSettings = flatSettings;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return settings in flat format.
|
|
||||||
* Used only by the high-level REST client.
|
|
||||||
*
|
|
||||||
* @return <code>true</code> if settings need to be returned in flat format; <code>false</code> otherwise.
|
|
||||||
*/
|
|
||||||
public boolean flatSettings() {
|
|
||||||
return flatSettings;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the value of "include_defaults".
|
* Sets the value of "include_defaults".
|
||||||
* Used only by the high-level REST client.
|
* Used only by the high-level REST client.
|
||||||
|
|
|
@ -55,7 +55,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
||||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
|
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
|
||||||
private Settings settings = EMPTY_SETTINGS;
|
private Settings settings = EMPTY_SETTINGS;
|
||||||
private boolean preserveExisting = false;
|
private boolean preserveExisting = false;
|
||||||
private boolean flatSettings = false;
|
|
||||||
|
|
||||||
public UpdateSettingsRequest() {
|
public UpdateSettingsRequest() {
|
||||||
}
|
}
|
||||||
|
@ -75,29 +74,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
||||||
this.settings = settings;
|
this.settings = settings;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the value of "flat_settings".
|
|
||||||
* Used only by the high-level REST client.
|
|
||||||
*
|
|
||||||
* @param flatSettings
|
|
||||||
* value of "flat_settings" flag to be set
|
|
||||||
* @return this request
|
|
||||||
*/
|
|
||||||
public UpdateSettingsRequest flatSettings(boolean flatSettings) {
|
|
||||||
this.flatSettings = flatSettings;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return settings in flat format.
|
|
||||||
* Used only by the high-level REST client.
|
|
||||||
*
|
|
||||||
* @return <code>true</code> if settings need to be returned in flat format; <code>false</code> otherwise.
|
|
||||||
*/
|
|
||||||
public boolean flatSettings() {
|
|
||||||
return flatSettings;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActionRequestValidationException validate() {
|
public ActionRequestValidationException validate() {
|
||||||
ActionRequestValidationException validationException = null;
|
ActionRequestValidationException validationException = null;
|
||||||
|
|
|
@ -46,7 +46,7 @@ public class TransportDeleteAction extends TransportSingleItemBulkWriteAction<De
|
||||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||||
TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) {
|
TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) {
|
||||||
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||||
actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX,
|
actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.BULK,
|
||||||
bulkAction, shardBulkAction);
|
bulkAction, shardBulkAction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class TransportIndexAction extends TransportSingleItemBulkWriteAction<Ind
|
||||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||||
TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) {
|
TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) {
|
||||||
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX,
|
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.BULK,
|
||||||
bulkAction, shardBulkAction);
|
bulkAction, shardBulkAction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected String executor() {
|
protected String executor() {
|
||||||
return ThreadPool.Names.INDEX;
|
return ThreadPool.Names.BULK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,11 +19,9 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.metadata;
|
package org.elasticsearch.cluster.metadata;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
@ -57,7 +55,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
|
||||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
@ -78,12 +75,11 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
import org.joda.time.DateTimeZone;
|
import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
@ -475,9 +471,11 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||||
|
|
||||||
// now, update the mappings with the actual source
|
// now, update the mappings with the actual source
|
||||||
Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
|
Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
|
||||||
for (DocumentMapper mapper : mapperService.docMappers(true)) {
|
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||||
MappingMetaData mappingMd = new MappingMetaData(mapper);
|
if (mapper != null) {
|
||||||
mappingsMetaData.put(mapper.type(), mappingMd);
|
MappingMetaData mappingMd = new MappingMetaData(mapper);
|
||||||
|
mappingsMetaData.put(mapper.type(), mappingMd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index())
|
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index())
|
||||||
|
|
|
@ -19,9 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.metadata;
|
package org.elasticsearch.cluster.metadata;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||||
|
@ -49,6 +47,7 @@ import org.elasticsearch.indices.InvalidTypeNameException;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -175,10 +174,13 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||||
String index = indexService.index().getName();
|
String index = indexService.index().getName();
|
||||||
try {
|
try {
|
||||||
List<String> updatedTypes = new ArrayList<>();
|
List<String> updatedTypes = new ArrayList<>();
|
||||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
MapperService mapperService = indexService.mapperService();
|
||||||
final String type = mapper.type();
|
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
if (mapper != null) {
|
||||||
updatedTypes.add(type);
|
final String type = mapper.type();
|
||||||
|
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||||
|
updatedTypes.add(type);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,8 +188,10 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||||
if (updatedTypes.isEmpty() == false) {
|
if (updatedTypes.isEmpty() == false) {
|
||||||
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
|
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
|
||||||
dirty = true;
|
dirty = true;
|
||||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||||
builder.putMapping(new MappingMetaData(mapper));
|
if (mapper != null) {
|
||||||
|
builder.putMapping(new MappingMetaData(mapper));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -320,8 +324,10 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||||
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||||
// Mapping updates on a single type may have side-effects on other types so we need to
|
// Mapping updates on a single type may have side-effects on other types so we need to
|
||||||
// update mapping metadata on all types
|
// update mapping metadata on all types
|
||||||
for (DocumentMapper mapper : mapperService.docMappers(true)) {
|
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||||
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
|
if (mapper != null) {
|
||||||
|
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
builder.put(indexMetaDataBuilder);
|
builder.put(indexMetaDataBuilder);
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,9 +24,14 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
||||||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||||
import org.apache.lucene.util.SloppyMath;
|
import org.apache.lucene.util.SloppyMath;
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.unit.DistanceUnit;
|
import org.elasticsearch.common.unit.DistanceUnit;
|
||||||
|
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||||
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||||
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
import org.elasticsearch.index.fielddata.FieldData;
|
||||||
import org.elasticsearch.index.fielddata.GeoPointValues;
|
import org.elasticsearch.index.fielddata.GeoPointValues;
|
||||||
|
@ -36,6 +41,7 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||||
import org.elasticsearch.index.fielddata.SortingNumericDoubleValues;
|
import org.elasticsearch.index.fielddata.SortingNumericDoubleValues;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
public class GeoUtils {
|
public class GeoUtils {
|
||||||
|
|
||||||
|
@ -351,6 +357,36 @@ public class GeoUtils {
|
||||||
return parseGeoPoint(parser, point, false);
|
return parseGeoPoint(parser, point, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses the value as a geopoint. The following types of values are supported:
|
||||||
|
* <p>
|
||||||
|
* Object: has to contain either lat and lon or geohash fields
|
||||||
|
* <p>
|
||||||
|
* String: expected to be in "latitude, longitude" format or a geohash
|
||||||
|
* <p>
|
||||||
|
* Array: two or more elements, the first element is longitude, the second is latitude, the rest is ignored if ignoreZValue is true
|
||||||
|
*/
|
||||||
|
public static GeoPoint parseGeoPoint(Object value, final boolean ignoreZValue) throws ElasticsearchParseException {
|
||||||
|
try {
|
||||||
|
XContentBuilder content = JsonXContent.contentBuilder();
|
||||||
|
content.startObject();
|
||||||
|
content.field("null_value", value);
|
||||||
|
content.endObject();
|
||||||
|
|
||||||
|
try (InputStream stream = BytesReference.bytes(content).streamInput();
|
||||||
|
XContentParser parser = JsonXContent.jsonXContent.createParser(
|
||||||
|
NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) {
|
||||||
|
parser.nextToken(); // start object
|
||||||
|
parser.nextToken(); // field name
|
||||||
|
parser.nextToken(); // field value
|
||||||
|
return parseGeoPoint(parser, new GeoPoint(), ignoreZValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (IOException ex) {
|
||||||
|
throw new ElasticsearchParseException("error parsing geopoint", ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms:
|
* Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms:
|
||||||
*
|
*
|
||||||
|
|
|
@ -697,8 +697,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||||
if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) {
|
if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) {
|
||||||
for (IndexShard shard : this.shards.values()) {
|
for (IndexShard shard : this.shards.values()) {
|
||||||
try {
|
try {
|
||||||
Translog translog = shard.getTranslog();
|
if (shard.isSyncNeeded()) {
|
||||||
if (translog.syncNeeded()) {
|
|
||||||
shard.sync();
|
shard.sync();
|
||||||
}
|
}
|
||||||
} catch (AlreadyClosedException ex) {
|
} catch (AlreadyClosedException ex) {
|
||||||
|
|
|
@ -121,7 +121,8 @@ public final class IndexWarmer extends AbstractComponent {
|
||||||
public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
|
public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||||
final MapperService mapperService = indexShard.mapperService();
|
final MapperService mapperService = indexShard.mapperService();
|
||||||
final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
|
final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
|
||||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
DocumentMapper docMapper = mapperService.documentMapper();
|
||||||
|
if (docMapper != null) {
|
||||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||||
final MappedFieldType fieldType = fieldMapper.fieldType();
|
final MappedFieldType fieldType = fieldMapper.fieldType();
|
||||||
final String indexName = fieldType.name();
|
final String indexName = fieldType.name();
|
||||||
|
|
|
@ -233,7 +233,8 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
|
||||||
boolean hasNested = false;
|
boolean hasNested = false;
|
||||||
final Set<Query> warmUp = new HashSet<>();
|
final Set<Query> warmUp = new HashSet<>();
|
||||||
final MapperService mapperService = indexShard.mapperService();
|
final MapperService mapperService = indexShard.mapperService();
|
||||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
DocumentMapper docMapper = mapperService.documentMapper();
|
||||||
|
if (docMapper != null) {
|
||||||
if (docMapper.hasNestedObjects()) {
|
if (docMapper.hasNestedObjects()) {
|
||||||
hasNested = true;
|
hasNested = true;
|
||||||
for (ObjectMapper objectMapper : docMapper.objectMappers().values()) {
|
for (ObjectMapper objectMapper : docMapper.objectMappers().values()) {
|
||||||
|
|
|
@ -66,6 +66,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.store.Store;
|
import org.elasticsearch.index.store.Store;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
|
import org.elasticsearch.index.translog.TranslogStats;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -510,8 +511,18 @@ public abstract class Engine implements Closeable {
|
||||||
EXTERNAL, INTERNAL
|
EXTERNAL, INTERNAL
|
||||||
}
|
}
|
||||||
|
|
||||||
/** returns the translog for this engine */
|
/**
|
||||||
public abstract Translog getTranslog();
|
* Returns the translog associated with this engine.
|
||||||
|
* Prefer to keep the translog package-private, so that an engine can control all accesses to the translog.
|
||||||
|
*/
|
||||||
|
abstract Translog getTranslog();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the underlying storage sync is required.
|
||||||
|
*/
|
||||||
|
public boolean isTranslogSyncNeeded() {
|
||||||
|
return getTranslog().syncNeeded();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensures that all locations in the given stream have been written to the underlying storage.
|
* Ensures that all locations in the given stream have been written to the underlying storage.
|
||||||
|
@ -520,6 +531,36 @@ public abstract class Engine implements Closeable {
|
||||||
|
|
||||||
public abstract void syncTranslog() throws IOException;
|
public abstract void syncTranslog() throws IOException;
|
||||||
|
|
||||||
|
public Closeable acquireTranslogRetentionLock() {
|
||||||
|
return getTranslog().acquireRetentionLock();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new translog snapshot from this engine for reading translog operations whose seq# in the provided range.
|
||||||
|
* The caller has to close the returned snapshot after finishing the reading.
|
||||||
|
*/
|
||||||
|
public Translog.Snapshot newTranslogSnapshotBetween(long minSeqNo, long maxSeqNo) throws IOException {
|
||||||
|
return getTranslog().getSnapshotBetween(minSeqNo, maxSeqNo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the estimated number of translog operations in this engine whose seq# at least the provided seq#.
|
||||||
|
*/
|
||||||
|
public int estimateTranslogOperationsFromMinSeq(long minSeqNo) {
|
||||||
|
return getTranslog().estimateTotalOperationsFromMinSeq(minSeqNo);
|
||||||
|
}
|
||||||
|
|
||||||
|
public TranslogStats getTranslogStats() {
|
||||||
|
return getTranslog().stats();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the last location that the translog of this engine has written into.
|
||||||
|
*/
|
||||||
|
public Translog.Location getTranslogLastWriteLocation() {
|
||||||
|
return getTranslog().getLastWriteLocation();
|
||||||
|
}
|
||||||
|
|
||||||
protected final void ensureOpen(Exception suppressed) {
|
protected final void ensureOpen(Exception suppressed) {
|
||||||
if (isClosed.get()) {
|
if (isClosed.get()) {
|
||||||
AlreadyClosedException ace = new AlreadyClosedException(shardId + " engine is closed", failedEngine.get());
|
AlreadyClosedException ace = new AlreadyClosedException(shardId + " engine is closed", failedEngine.get());
|
||||||
|
@ -548,6 +589,13 @@ public abstract class Engine implements Closeable {
|
||||||
*/
|
*/
|
||||||
public abstract LocalCheckpointTracker getLocalCheckpointTracker();
|
public abstract LocalCheckpointTracker getLocalCheckpointTracker();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint)
|
||||||
|
*/
|
||||||
|
public long getLastSyncedGlobalCheckpoint() {
|
||||||
|
return getTranslog().getLastSyncedGlobalCheckpoint();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Global stats on segments.
|
* Global stats on segments.
|
||||||
*/
|
*/
|
||||||
|
@ -812,6 +860,16 @@ public abstract class Engine implements Closeable {
|
||||||
*/
|
*/
|
||||||
public abstract void trimTranslog() throws EngineException;
|
public abstract void trimTranslog() throws EngineException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests whether or not the translog generation should be rolled to a new generation.
|
||||||
|
* This test is based on the size of the current generation compared to the configured generation threshold size.
|
||||||
|
*
|
||||||
|
* @return {@code true} if the current generation should be rolled to a new generation
|
||||||
|
*/
|
||||||
|
public boolean shouldRollTranslogGeneration() {
|
||||||
|
return getTranslog().shouldRollGeneration();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Rolls the translog generation and cleans unneeded.
|
* Rolls the translog generation and cleans unneeded.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -426,7 +426,7 @@ public class InternalEngine extends Engine {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Translog getTranslog() {
|
Translog getTranslog() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return translog;
|
return translog;
|
||||||
}
|
}
|
||||||
|
|
|
@ -631,23 +631,9 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
|
||||||
return values[index].get().utf8ToString();
|
return values[index].get().utf8ToString();
|
||||||
}
|
}
|
||||||
|
|
||||||
public BytesRef getBytesValue() {
|
|
||||||
if (size() > 0) {
|
|
||||||
return values[0].get();
|
|
||||||
} else {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getValue() {
|
public String getValue() {
|
||||||
BytesRef value = getBytesValue();
|
return count == 0 ? null : get(0);
|
||||||
if (value == null) {
|
|
||||||
return null;
|
|
||||||
} else {
|
|
||||||
return value.utf8ToString();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final class BytesRefs extends BinaryScriptDocValues<BytesRef> {
|
public static final class BytesRefs extends BinaryScriptDocValues<BytesRef> {
|
||||||
|
@ -658,14 +644,16 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BytesRef get(int index) {
|
public BytesRef get(int index) {
|
||||||
return values[index].get();
|
/**
|
||||||
|
* We need to make a copy here because {@link BinaryScriptDocValues} might reuse the
|
||||||
|
* returned value and the same instance might be used to
|
||||||
|
* return values from multiple documents.
|
||||||
|
**/
|
||||||
|
return values[index].toBytesRef();
|
||||||
}
|
}
|
||||||
|
|
||||||
public BytesRef getValue() {
|
public BytesRef getValue() {
|
||||||
if (count == 0) {
|
return count == 0 ? new BytesRef() : get(0);
|
||||||
return new BytesRef();
|
|
||||||
}
|
|
||||||
return values[0].get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,6 +60,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
public static class Names {
|
public static class Names {
|
||||||
public static final String IGNORE_MALFORMED = "ignore_malformed";
|
public static final String IGNORE_MALFORMED = "ignore_malformed";
|
||||||
public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value");
|
public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value");
|
||||||
|
public static final String NULL_VALUE = "null_value";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Defaults {
|
public static class Defaults {
|
||||||
|
@ -134,7 +135,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
throws MapperParsingException {
|
throws MapperParsingException {
|
||||||
Builder builder = new GeoPointFieldMapper.Builder(name);
|
Builder builder = new GeoPointFieldMapper.Builder(name);
|
||||||
parseField(builder, name, node, parserContext);
|
parseField(builder, name, node, parserContext);
|
||||||
|
Object nullValue = null;
|
||||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||||
Map.Entry<String, Object> entry = iterator.next();
|
Map.Entry<String, Object> entry = iterator.next();
|
||||||
String propName = entry.getKey();
|
String propName = entry.getKey();
|
||||||
|
@ -147,9 +148,31 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
builder.ignoreZValue(XContentMapValues.nodeBooleanValue(propNode,
|
builder.ignoreZValue(XContentMapValues.nodeBooleanValue(propNode,
|
||||||
name + "." + Names.IGNORE_Z_VALUE.getPreferredName()));
|
name + "." + Names.IGNORE_Z_VALUE.getPreferredName()));
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
|
} else if (propName.equals(Names.NULL_VALUE)) {
|
||||||
|
if (propNode == null) {
|
||||||
|
throw new MapperParsingException("Property [null_value] cannot be null.");
|
||||||
|
}
|
||||||
|
nullValue = propNode;
|
||||||
|
iterator.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nullValue != null) {
|
||||||
|
boolean ignoreZValue = builder.ignoreZValue == null ? Defaults.IGNORE_Z_VALUE.value() : builder.ignoreZValue;
|
||||||
|
boolean ignoreMalformed = builder.ignoreMalformed == null ? Defaults.IGNORE_MALFORMED.value() : builder.ignoreZValue;
|
||||||
|
GeoPoint point = GeoUtils.parseGeoPoint(nullValue, ignoreZValue);
|
||||||
|
if (ignoreMalformed == false) {
|
||||||
|
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||||
|
throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "]");
|
||||||
|
}
|
||||||
|
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||||
|
throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "]");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
GeoUtils.normalizePoint(point);
|
||||||
|
}
|
||||||
|
builder.nullValue(point);
|
||||||
|
}
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -318,7 +341,11 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
}
|
}
|
||||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||||
parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value()));
|
parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value()));
|
||||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
} else if (token == XContentParser.Token.VALUE_NULL) {
|
||||||
|
if (fieldType.nullValue() != null) {
|
||||||
|
parse(context, (GeoPoint) fieldType.nullValue());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
try {
|
try {
|
||||||
parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse));
|
parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse));
|
||||||
} catch (ElasticsearchParseException e) {
|
} catch (ElasticsearchParseException e) {
|
||||||
|
@ -337,11 +364,15 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||||
super.doXContentBody(builder, includeDefaults, params);
|
super.doXContentBody(builder, includeDefaults, params);
|
||||||
if (includeDefaults || ignoreMalformed.explicit()) {
|
if (includeDefaults || ignoreMalformed.explicit()) {
|
||||||
builder.field(GeoPointFieldMapper.Names.IGNORE_MALFORMED, ignoreMalformed.value());
|
builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value());
|
||||||
}
|
}
|
||||||
if (includeDefaults || ignoreZValue.explicit()) {
|
if (includeDefaults || ignoreZValue.explicit()) {
|
||||||
builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value());
|
builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (includeDefaults || fieldType().nullValue() != null) {
|
||||||
|
builder.field(Names.NULL_VALUE, fieldType().nullValue());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public Explicit<Boolean> ignoreZValue() {
|
public Explicit<Boolean> ignoreZValue() {
|
||||||
|
|
|
@ -57,6 +57,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -64,13 +65,12 @@ import java.util.HashSet;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import static java.util.Collections.emptyMap;
|
import static java.util.Collections.emptyMap;
|
||||||
import static java.util.Collections.emptySet;
|
|
||||||
import static java.util.Collections.unmodifiableMap;
|
import static java.util.Collections.unmodifiableMap;
|
||||||
|
|
||||||
public class MapperService extends AbstractIndexComponent implements Closeable {
|
public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
|
@ -121,7 +121,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
|
|
||||||
private volatile String defaultMappingSource;
|
private volatile String defaultMappingSource;
|
||||||
|
|
||||||
private volatile Map<String, DocumentMapper> mappers = emptyMap();
|
private volatile DocumentMapper mapper;
|
||||||
|
private volatile DocumentMapper defaultMapper;
|
||||||
|
|
||||||
private volatile FieldTypeLookup fieldTypes;
|
private volatile FieldTypeLookup fieldTypes;
|
||||||
private volatile Map<String, ObjectMapper> fullPathObjectMappers = emptyMap();
|
private volatile Map<String, ObjectMapper> fullPathObjectMappers = emptyMap();
|
||||||
|
@ -166,24 +167,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
return this.hasNested;
|
return this.hasNested;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* returns an immutable iterator over current document mappers.
|
|
||||||
*
|
|
||||||
* @param includingDefaultMapping indicates whether the iterator should contain the {@link #DEFAULT_MAPPING} document mapper.
|
|
||||||
* As is this not really an active type, you would typically set this to false
|
|
||||||
*/
|
|
||||||
public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
|
|
||||||
return () -> {
|
|
||||||
final Collection<DocumentMapper> documentMappers;
|
|
||||||
if (includingDefaultMapping) {
|
|
||||||
documentMappers = mappers.values();
|
|
||||||
} else {
|
|
||||||
documentMappers = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
return Collections.unmodifiableCollection(documentMappers).iterator();
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexAnalyzers getIndexAnalyzers() {
|
public IndexAnalyzers getIndexAnalyzers() {
|
||||||
return this.indexAnalyzers;
|
return this.indexAnalyzers;
|
||||||
}
|
}
|
||||||
|
@ -212,7 +195,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
|
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
|
||||||
assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex();
|
assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex();
|
||||||
// go over and add the relevant mappings (or update them)
|
// go over and add the relevant mappings (or update them)
|
||||||
final Set<String> existingMappers = new HashSet<>(mappers.keySet());
|
Set<String> existingMappers = new HashSet<>();
|
||||||
|
if (mapper != null) {
|
||||||
|
existingMappers.add(mapper.type());
|
||||||
|
}
|
||||||
|
if (defaultMapper != null) {
|
||||||
|
existingMappers.add(DEFAULT_MAPPING);
|
||||||
|
}
|
||||||
final Map<String, DocumentMapper> updatedEntries;
|
final Map<String, DocumentMapper> updatedEntries;
|
||||||
try {
|
try {
|
||||||
// only update entries if needed
|
// only update entries if needed
|
||||||
|
@ -314,29 +303,32 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
defaultMappingSourceOrLastStored = this.defaultMappingSource;
|
defaultMappingSourceOrLastStored = this.defaultMappingSource;
|
||||||
}
|
}
|
||||||
|
|
||||||
List<DocumentMapper> documentMappers = new ArrayList<>();
|
DocumentMapper documentMapper = null;
|
||||||
for (Map.Entry<String, CompressedXContent> entry : mappings.entrySet()) {
|
for (Map.Entry<String, CompressedXContent> entry : mappings.entrySet()) {
|
||||||
String type = entry.getKey();
|
String type = entry.getKey();
|
||||||
if (type.equals(DEFAULT_MAPPING)) {
|
if (type.equals(DEFAULT_MAPPING)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (documentMapper != null) {
|
||||||
|
throw new IllegalArgumentException("Cannot put multiple mappings: " + mappings.keySet());
|
||||||
|
}
|
||||||
|
|
||||||
final boolean applyDefault =
|
final boolean applyDefault =
|
||||||
// the default was already applied if we are recovering
|
// the default was already applied if we are recovering
|
||||||
reason != MergeReason.MAPPING_RECOVERY
|
reason != MergeReason.MAPPING_RECOVERY
|
||||||
// only apply the default mapping if we don't have the type yet
|
// only apply the default mapping if we don't have the type yet
|
||||||
&& mappers.containsKey(type) == false;
|
&& this.mapper == null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
DocumentMapper documentMapper =
|
documentMapper =
|
||||||
documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null);
|
documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null);
|
||||||
documentMappers.add(documentMapper);
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason);
|
return internalMerge(defaultMapper, defaultMappingSource, documentMapper, reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void validateTypeName(String type) {
|
static void validateTypeName(String type) {
|
||||||
|
@ -361,13 +353,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
|
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
|
||||||
List<DocumentMapper> documentMappers, MergeReason reason) {
|
DocumentMapper mapper, MergeReason reason) {
|
||||||
boolean hasNested = this.hasNested;
|
boolean hasNested = this.hasNested;
|
||||||
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
|
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
|
||||||
FieldTypeLookup fieldTypes = this.fieldTypes;
|
FieldTypeLookup fieldTypes = this.fieldTypes;
|
||||||
Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
|
|
||||||
|
|
||||||
Map<String, DocumentMapper> results = new LinkedHashMap<>(documentMappers.size() + 1);
|
Map<String, DocumentMapper> results = new LinkedHashMap<>(2);
|
||||||
|
|
||||||
if (defaultMapper != null) {
|
if (defaultMapper != null) {
|
||||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||||
|
@ -378,27 +369,23 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
"cannot have more than one type");
|
"cannot have more than one type");
|
||||||
}
|
}
|
||||||
assert defaultMapper.type().equals(DEFAULT_MAPPING);
|
assert defaultMapper.type().equals(DEFAULT_MAPPING);
|
||||||
mappers.put(DEFAULT_MAPPING, defaultMapper);
|
|
||||||
results.put(DEFAULT_MAPPING, defaultMapper);
|
results.put(DEFAULT_MAPPING, defaultMapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Set<String> actualTypes = new HashSet<>(mappers.keySet());
|
if (mapper != null && this.mapper != null && Objects.equals(this.mapper.type(), mapper.type()) == false) {
|
||||||
documentMappers.forEach(mapper -> actualTypes.add(mapper.type()));
|
|
||||||
actualTypes.remove(DEFAULT_MAPPING);
|
|
||||||
if (actualTypes.size() > 1) {
|
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes);
|
"Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + Arrays.asList(this.mapper.type(), mapper.type()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (DocumentMapper mapper : documentMappers) {
|
DocumentMapper newMapper = null;
|
||||||
|
if (mapper != null) {
|
||||||
// check naming
|
// check naming
|
||||||
validateTypeName(mapper.type());
|
validateTypeName(mapper.type());
|
||||||
|
|
||||||
// compute the merged DocumentMapper
|
// compute the merged DocumentMapper
|
||||||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
DocumentMapper oldMapper = this.mapper;
|
||||||
DocumentMapper newMapper;
|
|
||||||
if (oldMapper != null) {
|
if (oldMapper != null) {
|
||||||
newMapper = oldMapper.merge(mapper.mapping());
|
newMapper = oldMapper.merge(mapper.mapping());
|
||||||
} else {
|
} else {
|
||||||
|
@ -442,7 +429,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
results.put(newMapper.type(), newMapper);
|
results.put(newMapper.type(), newMapper);
|
||||||
mappers.put(newMapper.type(), newMapper);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reason == MergeReason.MAPPING_UPDATE) {
|
if (reason == MergeReason.MAPPING_UPDATE) {
|
||||||
|
@ -456,24 +442,16 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
}
|
}
|
||||||
checkIndexSortCompatibility(indexSettings.getIndexSortConfig(), hasNested);
|
checkIndexSortCompatibility(indexSettings.getIndexSortConfig(), hasNested);
|
||||||
|
|
||||||
for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
|
if (newMapper != null) {
|
||||||
if (entry.getKey().equals(DEFAULT_MAPPING)) {
|
DocumentMapper updatedDocumentMapper = newMapper.updateFieldType(fieldTypes.fullNameToFieldType);
|
||||||
continue;
|
if (updatedDocumentMapper != newMapper) {
|
||||||
}
|
|
||||||
DocumentMapper documentMapper = entry.getValue();
|
|
||||||
// apply changes to the field types back
|
|
||||||
DocumentMapper updatedDocumentMapper = documentMapper.updateFieldType(fieldTypes.fullNameToFieldType);
|
|
||||||
if (updatedDocumentMapper != documentMapper) {
|
|
||||||
// update both mappers and result
|
// update both mappers and result
|
||||||
entry.setValue(updatedDocumentMapper);
|
newMapper = updatedDocumentMapper;
|
||||||
if (results.containsKey(updatedDocumentMapper.type())) {
|
results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
|
||||||
results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// make structures immutable
|
// make structures immutable
|
||||||
mappers = Collections.unmodifiableMap(mappers);
|
|
||||||
results = Collections.unmodifiableMap(results);
|
results = Collections.unmodifiableMap(results);
|
||||||
|
|
||||||
// only need to immutably rewrap these if the previous reference was changed.
|
// only need to immutably rewrap these if the previous reference was changed.
|
||||||
|
@ -486,7 +464,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
if (defaultMappingSource != null) {
|
if (defaultMappingSource != null) {
|
||||||
this.defaultMappingSource = defaultMappingSource;
|
this.defaultMappingSource = defaultMappingSource;
|
||||||
}
|
}
|
||||||
this.mappers = mappers;
|
if (newMapper != null) {
|
||||||
|
this.mapper = newMapper;
|
||||||
|
}
|
||||||
|
this.defaultMapper = defaultMapper;
|
||||||
this.fieldTypes = fieldTypes;
|
this.fieldTypes = fieldTypes;
|
||||||
this.hasNested = hasNested;
|
this.hasNested = hasNested;
|
||||||
this.fullPathObjectMappers = fullPathObjectMappers;
|
this.fullPathObjectMappers = fullPathObjectMappers;
|
||||||
|
@ -498,7 +479,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean assertMappersShareSameFieldType() {
|
private boolean assertMappersShareSameFieldType() {
|
||||||
for (DocumentMapper mapper : docMappers(false)) {
|
if (mapper != null) {
|
||||||
List<FieldMapper> fieldMappers = new ArrayList<>();
|
List<FieldMapper> fieldMappers = new ArrayList<>();
|
||||||
Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
|
Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
|
||||||
MapperUtils.collect(mapper.root(), new ArrayList<>(), fieldMappers);
|
MapperUtils.collect(mapper.root(), new ArrayList<>(), fieldMappers);
|
||||||
|
@ -692,18 +673,20 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
|
return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean hasMapping(String mappingType) {
|
/**
|
||||||
return mappers.containsKey(mappingType);
|
* Get the set of types.
|
||||||
|
* @deprecated Indices may have one type at most, use {@link #documentMapper()} instead.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
public Set<String> types() {
|
||||||
|
return mapper == null ? Collections.emptySet() : Collections.singleton(mapper.type());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the set of concrete types that have a mapping.
|
* Return the document mapper, or {@code null} if no mapping has been put yet.
|
||||||
* NOTE: this does not return the default mapping.
|
|
||||||
*/
|
*/
|
||||||
public Collection<String> types() {
|
public DocumentMapper documentMapper() {
|
||||||
final Set<String> types = new HashSet<>(mappers.keySet());
|
return mapper;
|
||||||
types.remove(DEFAULT_MAPPING);
|
|
||||||
return Collections.unmodifiableSet(types);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -712,7 +695,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
* the default mapping.
|
* the default mapping.
|
||||||
*/
|
*/
|
||||||
public DocumentMapper documentMapper(String type) {
|
public DocumentMapper documentMapper(String type) {
|
||||||
return mappers.get(type);
|
if (mapper != null && type.equals(mapper.type())) {
|
||||||
|
return mapper;
|
||||||
|
}
|
||||||
|
if (DEFAULT_MAPPING.equals(type)) {
|
||||||
|
return defaultMapper;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -720,7 +709,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
* type has been dynamically created.
|
* type has been dynamically created.
|
||||||
*/
|
*/
|
||||||
public DocumentMapperForType documentMapperWithAutoCreate(String type) {
|
public DocumentMapperForType documentMapperWithAutoCreate(String type) {
|
||||||
DocumentMapper mapper = mappers.get(type);
|
DocumentMapper mapper = documentMapper(type);
|
||||||
if (mapper != null) {
|
if (mapper != null) {
|
||||||
return new DocumentMapperForType(mapper, null);
|
return new DocumentMapperForType(mapper, null);
|
||||||
}
|
}
|
||||||
|
@ -836,7 +825,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
|
|
||||||
/** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */
|
/** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */
|
||||||
public Term createUidTerm(String type, String id) {
|
public Term createUidTerm(String type, String id) {
|
||||||
if (hasMapping(type) == false) {
|
if (mapper == null || mapper.type().equals(type) == false) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
return new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||||
|
|
|
@ -89,7 +89,8 @@ public final class QueryParserHelper {
|
||||||
* @param field The field name to search.
|
* @param field The field name to search.
|
||||||
*/
|
*/
|
||||||
public static FieldMapper getFieldMapper(MapperService mapperService, String field) {
|
public static FieldMapper getFieldMapper(MapperService mapperService, String field) {
|
||||||
for (DocumentMapper mapper : mapperService.docMappers(true)) {
|
DocumentMapper mapper = mapperService.documentMapper();
|
||||||
|
if (mapper != null) {
|
||||||
FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper(field);
|
FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper(field);
|
||||||
if (fieldMapper != null) {
|
if (fieldMapper != null) {
|
||||||
return fieldMapper;
|
return fieldMapper;
|
||||||
|
|
|
@ -131,7 +131,7 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction<
|
||||||
|
|
||||||
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException {
|
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException {
|
||||||
if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST &&
|
if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST &&
|
||||||
indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
|
indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
|
||||||
indexShard.sync();
|
indexShard.sync();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -933,7 +933,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
public TranslogStats translogStats() {
|
public TranslogStats translogStats() {
|
||||||
return getEngine().getTranslog().stats();
|
return getEngine().getTranslogStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
public CompletionStats completionStats(String... fields) {
|
public CompletionStats completionStats(String... fields) {
|
||||||
|
@ -1330,7 +1330,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void onNewEngine(Engine newEngine) {
|
protected void onNewEngine(Engine newEngine) {
|
||||||
refreshListeners.setTranslog(newEngine.getTranslog());
|
refreshListeners.setCurrentRefreshLocationSupplier(newEngine::getTranslogLastWriteLocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1562,8 +1562,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
final Engine engine = getEngineOrNull();
|
final Engine engine = getEngineOrNull();
|
||||||
if (engine != null) {
|
if (engine != null) {
|
||||||
try {
|
try {
|
||||||
final Translog translog = engine.getTranslog();
|
return engine.shouldRollTranslogGeneration();
|
||||||
return translog.shouldRollGeneration();
|
|
||||||
} catch (final AlreadyClosedException e) {
|
} catch (final AlreadyClosedException e) {
|
||||||
// we are already closed, no need to flush or roll
|
// we are already closed, no need to flush or roll
|
||||||
}
|
}
|
||||||
|
@ -1578,9 +1577,30 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquires a lock on the translog files, preventing them from being trimmed.
|
||||||
|
*/
|
||||||
public Closeable acquireTranslogRetentionLock() {
|
public Closeable acquireTranslogRetentionLock() {
|
||||||
Engine engine = getEngine();
|
return getEngine().acquireTranslogRetentionLock();
|
||||||
return engine.getTranslog().acquireRetentionLock();
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new translog snapshot for reading translog operations whose seq# at least the provided seq#.
|
||||||
|
* The caller has to close the returned snapshot after finishing the reading.
|
||||||
|
*/
|
||||||
|
public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException {
|
||||||
|
return newTranslogSnapshotBetween(minSeqNo, Long.MAX_VALUE);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Translog.Snapshot newTranslogSnapshotBetween(long minSeqNo, long maxSeqNo) throws IOException {
|
||||||
|
return getEngine().newTranslogSnapshotBetween(minSeqNo, maxSeqNo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the estimated number of operations in translog whose seq# at least the provided seq#.
|
||||||
|
*/
|
||||||
|
public int estimateTranslogOperationsFromMinSeq(long minSeqNo) {
|
||||||
|
return getEngine().estimateTranslogOperationsFromMinSeq(minSeqNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Segment> segments(boolean verbose) {
|
public List<Segment> segments(boolean verbose) {
|
||||||
|
@ -1591,10 +1611,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
getEngine().flushAndClose();
|
getEngine().flushAndClose();
|
||||||
}
|
}
|
||||||
|
|
||||||
public Translog getTranslog() {
|
|
||||||
return getEngine().getTranslog();
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getHistoryUUID() {
|
public String getHistoryUUID() {
|
||||||
return getEngine().getHistoryUUID();
|
return getEngine().getHistoryUUID();
|
||||||
}
|
}
|
||||||
|
@ -1732,6 +1748,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
return replicationTracker.getGlobalCheckpoint();
|
return replicationTracker.getGlobalCheckpoint();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint)
|
||||||
|
*/
|
||||||
|
public long getLastSyncedGlobalCheckpoint() {
|
||||||
|
return getEngine().getLastSyncedGlobalCheckpoint();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the local knowledge of the global checkpoints for all in-sync allocation IDs.
|
* Get the local knowledge of the global checkpoints for all in-sync allocation IDs.
|
||||||
*
|
*
|
||||||
|
@ -2307,6 +2330,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
getEngine().syncTranslog();
|
getEngine().syncTranslog();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the underlying storage sync is required.
|
||||||
|
*/
|
||||||
|
public boolean isSyncNeeded() {
|
||||||
|
return getEngine().isTranslogSyncNeeded();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the current translog durability mode
|
* Returns the current translog durability mode
|
||||||
*/
|
*/
|
||||||
|
@ -2466,7 +2496,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setRefreshPending(Engine engine) {
|
private void setRefreshPending(Engine engine) {
|
||||||
Translog.Location lastWriteLocation = engine.getTranslog().getLastWriteLocation();
|
Translog.Location lastWriteLocation = engine.getTranslogLastWriteLocation();
|
||||||
Translog.Location location;
|
Translog.Location location;
|
||||||
do {
|
do {
|
||||||
location = this.pendingRefreshLocation.get();
|
location = this.pendingRefreshLocation.get();
|
||||||
|
|
|
@ -83,7 +83,7 @@ public class PrimaryReplicaSyncer extends AbstractComponent {
|
||||||
ActionListener<ResyncTask> resyncListener = null;
|
ActionListener<ResyncTask> resyncListener = null;
|
||||||
try {
|
try {
|
||||||
final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1;
|
final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1;
|
||||||
Translog.Snapshot snapshot = indexShard.getTranslog().newSnapshotFrom(startingSeqNo);
|
Translog.Snapshot snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo);
|
||||||
resyncListener = new ActionListener<ResyncTask>() {
|
resyncListener = new ActionListener<ResyncTask>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(final ResyncTask resyncTask) {
|
public void onResponse(final ResyncTask resyncTask) {
|
||||||
|
|
|
@ -32,6 +32,7 @@ import java.util.List;
|
||||||
import java.util.concurrent.Executor;
|
import java.util.concurrent.Executor;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.IntSupplier;
|
import java.util.function.IntSupplier;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
import static java.util.Objects.requireNonNull;
|
import static java.util.Objects.requireNonNull;
|
||||||
|
|
||||||
|
@ -153,21 +154,20 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener,
|
||||||
/**
|
/**
|
||||||
* Setup the translog used to find the last refreshed location.
|
* Setup the translog used to find the last refreshed location.
|
||||||
*/
|
*/
|
||||||
public void setTranslog(Translog translog) {
|
public void setCurrentRefreshLocationSupplier(Supplier<Translog.Location> currentRefreshLocationSupplier) {
|
||||||
this.translog = translog;
|
this.currentRefreshLocationSupplier = currentRefreshLocationSupplier;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implementation of ReferenceManager.RefreshListener that adapts Lucene's RefreshListener into Elasticsearch's refresh listeners.
|
|
||||||
private Translog translog;
|
|
||||||
/**
|
/**
|
||||||
* Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile
|
* Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile
|
||||||
* because when it is used by the refreshing thread.
|
* because when it is used by the refreshing thread.
|
||||||
*/
|
*/
|
||||||
private Translog.Location currentRefreshLocation;
|
private Translog.Location currentRefreshLocation;
|
||||||
|
private Supplier<Translog.Location> currentRefreshLocationSupplier;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void beforeRefresh() throws IOException {
|
public void beforeRefresh() throws IOException {
|
||||||
currentRefreshLocation = translog.getLastWriteLocation();
|
currentRefreshLocation = currentRefreshLocationSupplier.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -145,9 +145,6 @@ public class RecoverySourceHandler {
|
||||||
}, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ");
|
}, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ");
|
||||||
|
|
||||||
try (Closeable ignored = shard.acquireTranslogRetentionLock()) {
|
try (Closeable ignored = shard.acquireTranslogRetentionLock()) {
|
||||||
|
|
||||||
final Translog translog = shard.getTranslog();
|
|
||||||
|
|
||||||
final long startingSeqNo;
|
final long startingSeqNo;
|
||||||
final long requiredSeqNoRangeStart;
|
final long requiredSeqNoRangeStart;
|
||||||
final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO &&
|
final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO &&
|
||||||
|
@ -170,7 +167,7 @@ public class RecoverySourceHandler {
|
||||||
requiredSeqNoRangeStart =
|
requiredSeqNoRangeStart =
|
||||||
Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1;
|
Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1;
|
||||||
try {
|
try {
|
||||||
phase1(phase1Snapshot.getIndexCommit(), translog::totalOperations);
|
phase1(phase1Snapshot.getIndexCommit(), () -> shard.estimateTranslogOperationsFromMinSeq(startingSeqNo));
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e);
|
throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -187,7 +184,7 @@ public class RecoverySourceHandler {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// For a sequence based recovery, the target can keep its local translog
|
// For a sequence based recovery, the target can keep its local translog
|
||||||
prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, translog.estimateTotalOperationsFromMinSeq(startingSeqNo));
|
prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, shard.estimateTranslogOperationsFromMinSeq(startingSeqNo));
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e);
|
throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e);
|
||||||
}
|
}
|
||||||
|
@ -210,9 +207,9 @@ public class RecoverySourceHandler {
|
||||||
|
|
||||||
logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo);
|
logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo);
|
||||||
|
|
||||||
logger.trace("snapshot translog for recovery; current size is [{}]", translog.estimateTotalOperationsFromMinSeq(startingSeqNo));
|
logger.trace("snapshot translog for recovery; current size is [{}]", shard.estimateTranslogOperationsFromMinSeq(startingSeqNo));
|
||||||
final long targetLocalCheckpoint;
|
final long targetLocalCheckpoint;
|
||||||
try(Translog.Snapshot snapshot = translog.newSnapshotFrom(startingSeqNo)) {
|
try(Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) {
|
||||||
targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
|
targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
|
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
|
||||||
|
@ -261,7 +258,7 @@ public class RecoverySourceHandler {
|
||||||
// the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one
|
// the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one
|
||||||
if (startingSeqNo - 1 <= localCheckpoint) {
|
if (startingSeqNo - 1 <= localCheckpoint) {
|
||||||
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1);
|
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1);
|
||||||
try (Translog.Snapshot snapshot = shard.getTranslog().newSnapshotFrom(startingSeqNo)) {
|
try (Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) {
|
||||||
Translog.Operation operation;
|
Translog.Operation operation;
|
||||||
while ((operation = snapshot.next()) != null) {
|
while ((operation = snapshot.next()) != null) {
|
||||||
if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||||
|
|
|
@ -48,7 +48,7 @@ public abstract class ExecutorBuilder<U extends ExecutorBuilder.ExecutorSettings
|
||||||
}
|
}
|
||||||
|
|
||||||
protected int applyHardSizeLimit(final Settings settings, final String name) {
|
protected int applyHardSizeLimit(final Settings settings, final String name) {
|
||||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
if (name.equals(ThreadPool.Names.BULK)) {
|
||||||
return 1 + EsExecutors.numberOfProcessors(settings);
|
return 1 + EsExecutors.numberOfProcessors(settings);
|
||||||
} else {
|
} else {
|
||||||
return Integer.MAX_VALUE;
|
return Integer.MAX_VALUE;
|
||||||
|
|
|
@ -49,20 +49,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
|
||||||
* @param queueSize the size of the backing queue, -1 for unbounded
|
* @param queueSize the size of the backing queue, -1 for unbounded
|
||||||
*/
|
*/
|
||||||
FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) {
|
FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) {
|
||||||
this(settings, name, size, queueSize, false);
|
this(settings, name, size, queueSize, "thread_pool." + name);
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name.
|
|
||||||
*
|
|
||||||
* @param settings the node-level settings
|
|
||||||
* @param name the name of the executor
|
|
||||||
* @param size the fixed number of threads
|
|
||||||
* @param queueSize the size of the backing queue, -1 for unbounded
|
|
||||||
* @param deprecated whether or not the thread pool is deprecated
|
|
||||||
*/
|
|
||||||
FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final boolean deprecated) {
|
|
||||||
this(settings, name, size, queueSize, "thread_pool." + name, deprecated);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -75,41 +62,16 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
|
||||||
* @param prefix the prefix for the settings keys
|
* @param prefix the prefix for the settings keys
|
||||||
*/
|
*/
|
||||||
public FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) {
|
public FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) {
|
||||||
this(settings, name, size, queueSize, prefix, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Construct a fixed executor builder.
|
|
||||||
*
|
|
||||||
* @param settings the node-level settings
|
|
||||||
* @param name the name of the executor
|
|
||||||
* @param size the fixed number of threads
|
|
||||||
* @param queueSize the size of the backing queue, -1 for unbounded
|
|
||||||
* @param prefix the prefix for the settings keys
|
|
||||||
*/
|
|
||||||
private FixedExecutorBuilder(
|
|
||||||
final Settings settings,
|
|
||||||
final String name,
|
|
||||||
final int size,
|
|
||||||
final int queueSize,
|
|
||||||
final String prefix,
|
|
||||||
final boolean deprecated) {
|
|
||||||
super(name);
|
super(name);
|
||||||
final String sizeKey = settingsKey(prefix, "size");
|
final String sizeKey = settingsKey(prefix, "size");
|
||||||
final Setting.Property[] properties;
|
|
||||||
if (deprecated) {
|
|
||||||
properties = new Setting.Property[]{Setting.Property.NodeScope, Setting.Property.Deprecated};
|
|
||||||
} else {
|
|
||||||
properties = new Setting.Property[]{Setting.Property.NodeScope};
|
|
||||||
}
|
|
||||||
this.sizeSetting =
|
this.sizeSetting =
|
||||||
new Setting<>(
|
new Setting<>(
|
||||||
sizeKey,
|
sizeKey,
|
||||||
s -> Integer.toString(size),
|
s -> Integer.toString(size),
|
||||||
s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey),
|
s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey),
|
||||||
properties);
|
Setting.Property.NodeScope);
|
||||||
final String queueSizeKey = settingsKey(prefix, "queue_size");
|
final String queueSizeKey = settingsKey(prefix, "queue_size");
|
||||||
this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, properties);
|
this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -69,7 +69,6 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
||||||
public static final String LISTENER = "listener";
|
public static final String LISTENER = "listener";
|
||||||
public static final String GET = "get";
|
public static final String GET = "get";
|
||||||
public static final String ANALYZE = "analyze";
|
public static final String ANALYZE = "analyze";
|
||||||
public static final String INDEX = "index";
|
|
||||||
public static final String BULK = "bulk";
|
public static final String BULK = "bulk";
|
||||||
public static final String SEARCH = "search";
|
public static final String SEARCH = "search";
|
||||||
public static final String MANAGEMENT = "management";
|
public static final String MANAGEMENT = "management";
|
||||||
|
@ -126,7 +125,6 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
||||||
map.put(Names.LISTENER, ThreadPoolType.FIXED);
|
map.put(Names.LISTENER, ThreadPoolType.FIXED);
|
||||||
map.put(Names.GET, ThreadPoolType.FIXED);
|
map.put(Names.GET, ThreadPoolType.FIXED);
|
||||||
map.put(Names.ANALYZE, ThreadPoolType.FIXED);
|
map.put(Names.ANALYZE, ThreadPoolType.FIXED);
|
||||||
map.put(Names.INDEX, ThreadPoolType.FIXED);
|
|
||||||
map.put(Names.BULK, ThreadPoolType.FIXED);
|
map.put(Names.BULK, ThreadPoolType.FIXED);
|
||||||
map.put(Names.SEARCH, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE);
|
map.put(Names.SEARCH, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE);
|
||||||
map.put(Names.MANAGEMENT, ThreadPoolType.SCALING);
|
map.put(Names.MANAGEMENT, ThreadPoolType.SCALING);
|
||||||
|
@ -172,7 +170,6 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
||||||
final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);
|
final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);
|
||||||
final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);
|
final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);
|
||||||
builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30)));
|
builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30)));
|
||||||
builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200, true));
|
|
||||||
builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 200)); // now that we reuse bulk for index/delete ops
|
builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 200)); // now that we reuse bulk for index/delete ops
|
||||||
builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000));
|
builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000));
|
||||||
builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16));
|
builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16));
|
||||||
|
|
|
@ -45,8 +45,8 @@ public class RejectionActionIT extends ESIntegTestCase {
|
||||||
.put(super.nodeSettings(nodeOrdinal))
|
.put(super.nodeSettings(nodeOrdinal))
|
||||||
.put("thread_pool.search.size", 1)
|
.put("thread_pool.search.size", 1)
|
||||||
.put("thread_pool.search.queue_size", 1)
|
.put("thread_pool.search.queue_size", 1)
|
||||||
.put("thread_pool.index.size", 1)
|
.put("thread_pool.bulk.size", 1)
|
||||||
.put("thread_pool.index.queue_size", 1)
|
.put("thread_pool.bulk.queue_size", 1)
|
||||||
.put("thread_pool.get.size", 1)
|
.put("thread_pool.get.size", 1)
|
||||||
.put("thread_pool.get.queue_size", 1)
|
.put("thread_pool.get.queue_size", 1)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -67,7 +67,6 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest
|
||||||
request.timeout(randomTimeValue());
|
request.timeout(randomTimeValue());
|
||||||
request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||||
request.setPreserveExisting(randomBoolean());
|
request.setPreserveExisting(randomBoolean());
|
||||||
request.flatSettings(randomBoolean());
|
|
||||||
return request;
|
return request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +76,6 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest
|
||||||
result.timeout(request.timeout());
|
result.timeout(request.timeout());
|
||||||
result.indicesOptions(request.indicesOptions());
|
result.indicesOptions(request.indicesOptions());
|
||||||
result.setPreserveExisting(request.isPreserveExisting());
|
result.setPreserveExisting(request.isPreserveExisting());
|
||||||
result.flatSettings(request.flatSettings());
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -124,7 +124,7 @@ public class TransportBulkActionIngestTests extends ESTestCase {
|
||||||
super(Settings.EMPTY, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService,
|
super(Settings.EMPTY, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService,
|
||||||
TransportBulkActionIngestTests.this.clusterService,
|
TransportBulkActionIngestTests.this.clusterService,
|
||||||
null, null, null, new ActionFilters(Collections.emptySet()), null,
|
null, null, null, new ActionFilters(Collections.emptySet()), null,
|
||||||
IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX, bulkAction, null);
|
IndexRequest::new, IndexRequest::new, ThreadPool.Names.BULK, bulkAction, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -433,7 +433,7 @@ public class IndexCreationTaskTests extends ESTestCase {
|
||||||
|
|
||||||
when(docMapper.routingFieldMapper()).thenReturn(routingMapper);
|
when(docMapper.routingFieldMapper()).thenReturn(routingMapper);
|
||||||
|
|
||||||
when(mapper.docMappers(anyBoolean())).thenReturn(Collections.singletonList(docMapper));
|
when(mapper.documentMapper()).thenReturn(docMapper);
|
||||||
|
|
||||||
final Index index = new Index("target", "tgt1234");
|
final Index index = new Index("target", "tgt1234");
|
||||||
final Supplier<Sort> supplier = mock(Supplier.class);
|
final Supplier<Sort> supplier = mock(Supplier.class);
|
||||||
|
|
|
@ -250,7 +250,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||||
IndexShard shard = indexService.getShard(0);
|
IndexShard shard = indexService.getShard(0);
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
assertFalse(shard.getTranslog().syncNeeded());
|
assertFalse(shard.isSyncNeeded());
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||||
assertNotNull(indexService.getFsyncTask());
|
assertNotNull(indexService.getFsyncTask());
|
||||||
final IndexShard shard = indexService.getShard(0);
|
final IndexShard shard = indexService.getShard(0);
|
||||||
assertBusy(() -> assertFalse(shard.getTranslog().syncNeeded()));
|
assertBusy(() -> assertFalse(shard.isSyncNeeded()));
|
||||||
|
|
||||||
client()
|
client()
|
||||||
.admin()
|
.admin()
|
||||||
|
@ -311,7 +311,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||||
indexService.updateMetaData(metaData);
|
indexService.updateMetaData(metaData);
|
||||||
|
|
||||||
IndexShard shard = indexService.getShard(0);
|
IndexShard shard = indexService.getShard(0);
|
||||||
assertBusy(() -> assertThat(shard.getTranslog().totalOperations(), equalTo(0)));
|
assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIllegalFsyncInterval() {
|
public void testIllegalFsyncInterval() {
|
||||||
|
|
|
@ -52,7 +52,6 @@ public class BinaryDVFieldDataTests extends AbstractFieldDataTestCase {
|
||||||
|
|
||||||
final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping));
|
final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping));
|
||||||
|
|
||||||
|
|
||||||
List<BytesRef> bytesList1 = new ArrayList<>(2);
|
List<BytesRef> bytesList1 = new ArrayList<>(2);
|
||||||
bytesList1.add(randomBytes());
|
bytesList1.add(randomBytes());
|
||||||
bytesList1.add(randomBytes());
|
bytesList1.add(randomBytes());
|
||||||
|
@ -123,22 +122,26 @@ public class BinaryDVFieldDataTests extends AbstractFieldDataTestCase {
|
||||||
// Test whether ScriptDocValues.BytesRefs makes a deepcopy
|
// Test whether ScriptDocValues.BytesRefs makes a deepcopy
|
||||||
fieldData = indexFieldData.load(reader);
|
fieldData = indexFieldData.load(reader);
|
||||||
ScriptDocValues<?> scriptValues = fieldData.getScriptValues();
|
ScriptDocValues<?> scriptValues = fieldData.getScriptValues();
|
||||||
scriptValues.setNextDocId(0);
|
Object[][] retValues = new BytesRef[4][0];
|
||||||
assertEquals(2, scriptValues.size());
|
for (int i = 0; i < 4; i++) {
|
||||||
assertEquals(bytesList1.get(0), scriptValues.get(0));
|
scriptValues.setNextDocId(i);
|
||||||
assertEquals(bytesList1.get(1), scriptValues.get(1));
|
retValues[i] = new BytesRef[scriptValues.size()];
|
||||||
|
for (int j = 0; j < retValues[i].length; j++) {
|
||||||
|
retValues[i][j] = scriptValues.get(j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertEquals(2, retValues[0].length);
|
||||||
|
assertEquals(bytesList1.get(0), retValues[0][0]);
|
||||||
|
assertEquals(bytesList1.get(1), retValues[0][1]);
|
||||||
|
|
||||||
scriptValues.setNextDocId(1);
|
assertEquals(1, retValues[1].length);
|
||||||
assertEquals(1, scriptValues.size());
|
assertEquals(bytes1, retValues[1][0]);
|
||||||
assertEquals(bytes1, scriptValues.get(0));
|
|
||||||
|
|
||||||
scriptValues.setNextDocId(2);
|
assertEquals(0, retValues[2].length);
|
||||||
assertEquals(0, scriptValues.size());
|
|
||||||
|
|
||||||
scriptValues.setNextDocId(3);
|
assertEquals(2, retValues[3].length);
|
||||||
assertEquals(2, scriptValues.size());
|
assertEquals(bytesList2.get(0), retValues[3][0]);
|
||||||
assertEquals(bytesList2.get(0), scriptValues.get(0));
|
assertEquals(bytesList2.get(1), retValues[3][1]);
|
||||||
assertEquals(bytesList2.get(1), scriptValues.get(1));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static BytesRef randomBytes() {
|
private static BytesRef randomBytes() {
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.index.mapper;
|
package org.elasticsearch.index.mapper;
|
||||||
|
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
|
@ -41,10 +42,12 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI
|
||||||
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
|
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE;
|
import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE;
|
||||||
|
import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.NULL_VALUE;
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
import static org.hamcrest.Matchers.not;
|
||||||
import static org.hamcrest.Matchers.notNullValue;
|
import static org.hamcrest.Matchers.notNullValue;
|
||||||
|
|
||||||
public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
|
@ -349,4 +352,50 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
);
|
);
|
||||||
assertThat(e.getMessage(), containsString("name cannot be empty string"));
|
assertThat(e.getMessage(), containsString("name cannot be empty string"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testNullValue() throws Exception {
|
||||||
|
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("location")
|
||||||
|
.field("type", "geo_point")
|
||||||
|
.field(NULL_VALUE, "1,2")
|
||||||
|
.endObject().endObject()
|
||||||
|
.endObject().endObject());
|
||||||
|
|
||||||
|
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser()
|
||||||
|
.parse("type", new CompressedXContent(mapping));
|
||||||
|
FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
|
||||||
|
assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class));
|
||||||
|
|
||||||
|
Object nullValue = fieldMapper.fieldType().nullValue();
|
||||||
|
assertThat(nullValue, equalTo(new GeoPoint(1, 2)));
|
||||||
|
|
||||||
|
ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||||
|
.bytes(XContentFactory.jsonBuilder()
|
||||||
|
.startObject()
|
||||||
|
.nullField("location")
|
||||||
|
.endObject()),
|
||||||
|
XContentType.JSON));
|
||||||
|
|
||||||
|
assertThat(doc.rootDoc().getField("location"), notNullValue());
|
||||||
|
BytesRef defaultValue = doc.rootDoc().getField("location").binaryValue();
|
||||||
|
|
||||||
|
doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||||
|
.bytes(XContentFactory.jsonBuilder()
|
||||||
|
.startObject()
|
||||||
|
.field("location", "1, 2")
|
||||||
|
.endObject()),
|
||||||
|
XContentType.JSON));
|
||||||
|
// Shouldn't matter if we specify the value explicitly or use null value
|
||||||
|
assertThat(defaultValue, equalTo(doc.rootDoc().getField("location").binaryValue()));
|
||||||
|
|
||||||
|
doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||||
|
.bytes(XContentFactory.jsonBuilder()
|
||||||
|
.startObject()
|
||||||
|
.field("location", "3, 4")
|
||||||
|
.endObject()),
|
||||||
|
XContentType.JSON));
|
||||||
|
// Shouldn't matter if we specify the value explicitly or use null value
|
||||||
|
assertThat(defaultValue, not(equalTo(doc.rootDoc().getField("location").binaryValue())));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
||||||
} else {
|
} else {
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING));
|
assertNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testTotalFieldsExceedsLimit() throws Throwable {
|
public void testTotalFieldsExceedsLimit() throws Throwable {
|
||||||
|
|
|
@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
public class NullValueTests extends ESSingleNodeTestCase {
|
public class NullValueTests extends ESSingleNodeTestCase {
|
||||||
public void testNullNullValue() throws Exception {
|
public void testNullNullValue() throws Exception {
|
||||||
IndexService indexService = createIndex("test", Settings.builder().build());
|
IndexService indexService = createIndex("test", Settings.builder().build());
|
||||||
String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte"};
|
String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte", "geo_point"};
|
||||||
|
|
||||||
for (String type : typesToTest) {
|
for (String type : typesToTest) {
|
||||||
String mapping = Strings.toString(XContentFactory.jsonBuilder()
|
String mapping = Strings.toString(XContentFactory.jsonBuilder()
|
||||||
|
|
|
@ -543,7 +543,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ThreadPool.Names.INDEX, request);
|
ThreadPool.Names.BULK, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -682,7 +682,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
@Override
|
@Override
|
||||||
protected PrimaryResult performOnPrimary(
|
protected PrimaryResult performOnPrimary(
|
||||||
final IndexShard primary, final GlobalCheckpointSyncAction.Request request) throws Exception {
|
final IndexShard primary, final GlobalCheckpointSyncAction.Request request) throws Exception {
|
||||||
primary.getTranslog().sync();
|
primary.sync();
|
||||||
return new PrimaryResult(request, new ReplicationResponse());
|
return new PrimaryResult(request, new ReplicationResponse());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -330,7 +330,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
|
|
||||||
final Translog.Operation op1;
|
final Translog.Operation op1;
|
||||||
final List<Translog.Operation> initOperations = new ArrayList<>(initDocs);
|
final List<Translog.Operation> initOperations = new ArrayList<>(initDocs);
|
||||||
try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) {
|
try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) {
|
||||||
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
||||||
for (int i = 0; i < initDocs; i++) {
|
for (int i = 0; i < initDocs; i++) {
|
||||||
Translog.Operation op = snapshot.next();
|
Translog.Operation op = snapshot.next();
|
||||||
|
@ -347,7 +347,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed.
|
shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed.
|
||||||
shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON));
|
shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON));
|
||||||
final Translog.Operation op2;
|
final Translog.Operation op2;
|
||||||
try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) {
|
try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) {
|
||||||
assertThat(snapshot.totalOperations(), equalTo(initDocs + 2));
|
assertThat(snapshot.totalOperations(), equalTo(initDocs + 2));
|
||||||
op2 = snapshot.next();
|
op2 = snapshot.next();
|
||||||
assertThat(op2.seqNo(), equalTo(op1.seqNo()));
|
assertThat(op2.seqNo(), equalTo(op1.seqNo()));
|
||||||
|
@ -362,7 +362,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
shards.promoteReplicaToPrimary(replica2);
|
shards.promoteReplicaToPrimary(replica2);
|
||||||
logger.info("--> Recover replica3 from replica2");
|
logger.info("--> Recover replica3 from replica2");
|
||||||
recoverReplica(replica3, replica2);
|
recoverReplica(replica3, replica2);
|
||||||
try (Translog.Snapshot snapshot = replica3.getTranslog().newSnapshot()) {
|
try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) {
|
||||||
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
||||||
assertThat(snapshot.next(), equalTo(op2));
|
assertThat(snapshot.next(), equalTo(op2));
|
||||||
assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations));
|
assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations));
|
||||||
|
@ -468,7 +468,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
long expectedPrimaryTerm,
|
long expectedPrimaryTerm,
|
||||||
String failureMessage) throws IOException {
|
String failureMessage) throws IOException {
|
||||||
for (IndexShard indexShard : replicationGroup) {
|
for (IndexShard indexShard : replicationGroup) {
|
||||||
try(Translog.Snapshot snapshot = indexShard.getTranslog().newSnapshot()) {
|
try(Translog.Snapshot snapshot = getTranslog(indexShard).newSnapshot()) {
|
||||||
assertThat(snapshot.totalOperations(), equalTo(expectedOperation));
|
assertThat(snapshot.totalOperations(), equalTo(expectedOperation));
|
||||||
long expectedSeqNo = 0L;
|
long expectedSeqNo = 0L;
|
||||||
Translog.Operation op = snapshot.next();
|
Translog.Operation op = snapshot.next();
|
||||||
|
|
|
@ -129,7 +129,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
||||||
shards.flush();
|
shards.flush();
|
||||||
translogTrimmed = randomBoolean();
|
translogTrimmed = randomBoolean();
|
||||||
if (translogTrimmed) {
|
if (translogTrimmed) {
|
||||||
final Translog translog = shards.getPrimary().getTranslog();
|
final Translog translog = getTranslog(shards.getPrimary());
|
||||||
translog.getDeletionPolicy().setRetentionAgeInMillis(0);
|
translog.getDeletionPolicy().setRetentionAgeInMillis(0);
|
||||||
translog.trimUnreferencedReaders();
|
translog.trimUnreferencedReaders();
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
||||||
// otherwise the deletion policy won't trim translog
|
// otherwise the deletion policy won't trim translog
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
shards.syncGlobalCheckpoint();
|
shards.syncGlobalCheckpoint();
|
||||||
assertThat(newPrimary.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo()));
|
assertThat(newPrimary.getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo()));
|
||||||
});
|
});
|
||||||
newPrimary.flush(new FlushRequest());
|
newPrimary.flush(new FlushRequest());
|
||||||
uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10));
|
uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10));
|
||||||
|
@ -341,7 +341,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
||||||
// Index more docs - move the global checkpoint >= seqno of the stale operations.
|
// Index more docs - move the global checkpoint >= seqno of the stale operations.
|
||||||
goodDocs += shards.indexDocs(scaledRandomIntBetween(staleDocs, staleDocs * 5));
|
goodDocs += shards.indexDocs(scaledRandomIntBetween(staleDocs, staleDocs * 5));
|
||||||
shards.syncGlobalCheckpoint();
|
shards.syncGlobalCheckpoint();
|
||||||
assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo()));
|
assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo()));
|
||||||
// Recover a replica again should also rollback the stale documents.
|
// Recover a replica again should also rollback the stale documents.
|
||||||
shards.removeReplica(replica);
|
shards.removeReplica(replica);
|
||||||
replica.close("recover replica - second time", false);
|
replica.close("recover replica - second time", false);
|
||||||
|
|
|
@ -76,14 +76,26 @@ public class GeoPointParsingTests extends ESTestCase {
|
||||||
GeoPoint point = GeoUtils.parseGeoPoint(objectLatLon(randomPt.lat(), randomPt.lon()));
|
GeoPoint point = GeoUtils.parseGeoPoint(objectLatLon(randomPt.lat(), randomPt.lon()));
|
||||||
assertPointsEqual(point, randomPt);
|
assertPointsEqual(point, randomPt);
|
||||||
|
|
||||||
|
GeoUtils.parseGeoPoint(toObject(objectLatLon(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||||
|
assertPointsEqual(point, randomPt);
|
||||||
|
|
||||||
GeoUtils.parseGeoPoint(arrayLatLon(randomPt.lat(), randomPt.lon()), point);
|
GeoUtils.parseGeoPoint(arrayLatLon(randomPt.lat(), randomPt.lon()), point);
|
||||||
assertPointsEqual(point, randomPt);
|
assertPointsEqual(point, randomPt);
|
||||||
|
|
||||||
|
GeoUtils.parseGeoPoint(toObject(arrayLatLon(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||||
|
assertPointsEqual(point, randomPt);
|
||||||
|
|
||||||
GeoUtils.parseGeoPoint(geohash(randomPt.lat(), randomPt.lon()), point);
|
GeoUtils.parseGeoPoint(geohash(randomPt.lat(), randomPt.lon()), point);
|
||||||
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||||
|
|
||||||
|
GeoUtils.parseGeoPoint(toObject(geohash(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||||
|
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||||
|
|
||||||
GeoUtils.parseGeoPoint(stringLatLon(randomPt.lat(), randomPt.lon()), point);
|
GeoUtils.parseGeoPoint(stringLatLon(randomPt.lat(), randomPt.lon()), point);
|
||||||
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||||
|
|
||||||
|
GeoUtils.parseGeoPoint(toObject(stringLatLon(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||||
|
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Based on #5390
|
// Based on #5390
|
||||||
|
@ -99,6 +111,12 @@ public class GeoPointParsingTests extends ESTestCase {
|
||||||
parser.nextToken();
|
parser.nextToken();
|
||||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||||
|
|
||||||
|
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||||
|
parser2.nextToken();
|
||||||
|
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||||
|
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||||
|
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testInvalidPointLatHashMix() throws IOException {
|
public void testInvalidPointLatHashMix() throws IOException {
|
||||||
|
@ -109,9 +127,14 @@ public class GeoPointParsingTests extends ESTestCase {
|
||||||
|
|
||||||
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||||
parser.nextToken();
|
parser.nextToken();
|
||||||
|
|
||||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||||
|
|
||||||
|
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||||
|
parser2.nextToken();
|
||||||
|
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||||
|
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||||
|
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testInvalidPointLonHashMix() throws IOException {
|
public void testInvalidPointLonHashMix() throws IOException {
|
||||||
|
@ -125,6 +148,12 @@ public class GeoPointParsingTests extends ESTestCase {
|
||||||
|
|
||||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||||
|
|
||||||
|
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||||
|
parser2.nextToken();
|
||||||
|
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||||
|
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||||
|
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testInvalidField() throws IOException {
|
public void testInvalidField() throws IOException {
|
||||||
|
@ -135,9 +164,15 @@ public class GeoPointParsingTests extends ESTestCase {
|
||||||
|
|
||||||
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||||
parser.nextToken();
|
parser.nextToken();
|
||||||
|
|
||||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||||
|
|
||||||
|
|
||||||
|
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||||
|
parser2.nextToken();
|
||||||
|
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||||
|
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||||
|
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private XContentParser objectLatLon(double lat, double lon) throws IOException {
|
private XContentParser objectLatLon(double lat, double lon) throws IOException {
|
||||||
|
@ -183,4 +218,22 @@ public class GeoPointParsingTests extends ESTestCase {
|
||||||
assertEquals(point.lat(), lat, TOLERANCE);
|
assertEquals(point.lat(), lat, TOLERANCE);
|
||||||
assertEquals(point.lon(), lon, TOLERANCE);
|
assertEquals(point.lon(), lon, TOLERANCE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Object toObject(XContentParser parser) throws IOException {
|
||||||
|
XContentParser.Token token = parser.currentToken();
|
||||||
|
if (token == XContentParser.Token.VALUE_NULL) {
|
||||||
|
return null;
|
||||||
|
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||||
|
return parser.text();
|
||||||
|
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||||
|
return parser.numberValue();
|
||||||
|
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||||
|
return parser.map();
|
||||||
|
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||||
|
return parser.list();
|
||||||
|
} else {
|
||||||
|
fail("Unexpected token " + token);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,9 +90,6 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase {
|
||||||
final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST);
|
final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST);
|
||||||
when(indexShard.getTranslogDurability()).thenReturn(durability);
|
when(indexShard.getTranslogDurability()).thenReturn(durability);
|
||||||
|
|
||||||
final Translog translog = mock(Translog.class);
|
|
||||||
when(indexShard.getTranslog()).thenReturn(translog);
|
|
||||||
|
|
||||||
final long globalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Integer.MAX_VALUE);
|
final long globalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Integer.MAX_VALUE);
|
||||||
final long lastSyncedGlobalCheckpoint;
|
final long lastSyncedGlobalCheckpoint;
|
||||||
if (randomBoolean() && globalCheckpoint != SequenceNumbers.NO_OPS_PERFORMED) {
|
if (randomBoolean() && globalCheckpoint != SequenceNumbers.NO_OPS_PERFORMED) {
|
||||||
|
@ -104,7 +101,7 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint);
|
when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint);
|
||||||
when(translog.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint);
|
when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint);
|
||||||
|
|
||||||
final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction(
|
final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction(
|
||||||
Settings.EMPTY,
|
Settings.EMPTY,
|
||||||
|
|
|
@ -107,6 +107,7 @@ import static org.hamcrest.Matchers.allOf;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
|
||||||
|
|
||||||
public class IndexShardIT extends ESSingleNodeTestCase {
|
public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
|
|
||||||
|
@ -167,7 +168,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||||
IndexShard shard = test.getShardOrNull(0);
|
IndexShard shard = test.getShardOrNull(0);
|
||||||
Translog translog = ShardUtilsTests.getShardEngine(shard).getTranslog();
|
Translog translog = getTranslog(shard);
|
||||||
Predicate<Translog> needsSync = (tlog) -> {
|
Predicate<Translog> needsSync = (tlog) -> {
|
||||||
// we can't use tlog.needsSync() here since it also takes the global checkpoint into account
|
// we can't use tlog.needsSync() here since it also takes the global checkpoint into account
|
||||||
// we explicitly want to check here if our durability checks are taken into account so we only
|
// we explicitly want to check here if our durability checks are taken into account so we only
|
||||||
|
@ -343,7 +344,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON),
|
SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON),
|
||||||
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
|
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
|
||||||
assertTrue(shard.shouldPeriodicallyFlush());
|
assertTrue(shard.shouldPeriodicallyFlush());
|
||||||
final Translog translog = shard.getEngine().getTranslog();
|
final Translog translog = getTranslog(shard);
|
||||||
assertEquals(2, translog.stats().getUncommittedOperations());
|
assertEquals(2, translog.stats().getUncommittedOperations());
|
||||||
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON)
|
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON)
|
||||||
.setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
.setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
||||||
|
@ -384,7 +385,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
final IndexService test = indicesService.indexService(resolveIndex("test"));
|
final IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||||
final IndexShard shard = test.getShardOrNull(0);
|
final IndexShard shard = test.getShardOrNull(0);
|
||||||
int rolls = 0;
|
int rolls = 0;
|
||||||
final Translog translog = shard.getEngine().getTranslog();
|
final Translog translog = getTranslog(shard);
|
||||||
final long generation = translog.currentFileGeneration();
|
final long generation = translog.currentFileGeneration();
|
||||||
final int numberOfDocuments = randomIntBetween(32, 128);
|
final int numberOfDocuments = randomIntBetween(32, 128);
|
||||||
for (int i = 0; i < numberOfDocuments; i++) {
|
for (int i = 0; i < numberOfDocuments; i++) {
|
||||||
|
@ -454,11 +455,11 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
assertThat(shard.flushStats().getPeriodic(), equalTo(periodic + 1));
|
assertThat(shard.flushStats().getPeriodic(), equalTo(periodic + 1));
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
final long generation = shard.getEngine().getTranslog().currentFileGeneration();
|
final long generation = getTranslog(shard).currentFileGeneration();
|
||||||
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
|
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
|
||||||
check = () -> assertEquals(
|
check = () -> assertEquals(
|
||||||
generation + 1,
|
generation + 1,
|
||||||
shard.getEngine().getTranslog().currentFileGeneration());
|
getTranslog(shard).currentFileGeneration());
|
||||||
}
|
}
|
||||||
assertBusy(check);
|
assertBusy(check);
|
||||||
running.set(false);
|
running.set(false);
|
||||||
|
|
|
@ -72,7 +72,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.env.NodeEnvironment;
|
import org.elasticsearch.env.NodeEnvironment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.MergePolicyConfig;
|
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.engine.EngineException;
|
import org.elasticsearch.index.engine.EngineException;
|
||||||
|
@ -286,14 +285,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
closeShards(indexShard);
|
closeShards(indexShard);
|
||||||
assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
|
assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
|
||||||
try {
|
try {
|
||||||
indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, "");
|
indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.BULK, "");
|
||||||
fail("we should not be able to increment anymore");
|
fail("we should not be able to increment anymore");
|
||||||
} catch (IndexShardClosedException e) {
|
} catch (IndexShardClosedException e) {
|
||||||
// expected
|
// expected
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
||||||
ThreadPool.Names.INDEX, "");
|
ThreadPool.Names.BULK, "");
|
||||||
fail("we should not be able to increment anymore");
|
fail("we should not be able to increment anymore");
|
||||||
} catch (IndexShardClosedException e) {
|
} catch (IndexShardClosedException e) {
|
||||||
// expected
|
// expected
|
||||||
|
@ -304,7 +303,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
IndexShard indexShard = newShard(false);
|
IndexShard indexShard = newShard(false);
|
||||||
expectThrows(IndexShardNotStartedException.class, () ->
|
expectThrows(IndexShardNotStartedException.class, () ->
|
||||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100),
|
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100),
|
||||||
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX, ""));
|
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.BULK, ""));
|
||||||
closeShards(indexShard);
|
closeShards(indexShard);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +343,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ThreadPool.Names.INDEX, id);
|
ThreadPool.Names.BULK, id);
|
||||||
});
|
});
|
||||||
thread.start();
|
thread.start();
|
||||||
threads.add(thread);
|
threads.add(thread);
|
||||||
|
@ -395,7 +394,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ThreadPool.Names.INDEX, id);
|
ThreadPool.Names.BULK, id);
|
||||||
});
|
});
|
||||||
thread.start();
|
thread.start();
|
||||||
delayedThreads.add(thread);
|
delayedThreads.add(thread);
|
||||||
|
@ -519,7 +518,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
public void testPrimaryPromotionRollsGeneration() throws Exception {
|
public void testPrimaryPromotionRollsGeneration() throws Exception {
|
||||||
final IndexShard indexShard = newStartedShard(false);
|
final IndexShard indexShard = newStartedShard(false);
|
||||||
|
|
||||||
final long currentTranslogGeneration = indexShard.getTranslog().getGeneration().translogFileGeneration;
|
final long currentTranslogGeneration = getTranslog(indexShard).getGeneration().translogFileGeneration;
|
||||||
|
|
||||||
// promote the replica
|
// promote the replica
|
||||||
final ShardRouting replicaRouting = indexShard.routingEntry();
|
final ShardRouting replicaRouting = indexShard.routingEntry();
|
||||||
|
@ -557,8 +556,8 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
ThreadPool.Names.GENERIC, "");
|
ThreadPool.Names.GENERIC, "");
|
||||||
|
|
||||||
latch.await();
|
latch.await();
|
||||||
assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1));
|
assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1));
|
||||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
|
|
||||||
closeShards(indexShard);
|
closeShards(indexShard);
|
||||||
}
|
}
|
||||||
|
@ -579,7 +578,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
|
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
|
||||||
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000);
|
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000);
|
||||||
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
|
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
|
||||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
}, 0L,
|
}, 0L,
|
||||||
Collections.singleton(indexShard.routingEntry().allocationId().getId()),
|
Collections.singleton(indexShard.routingEntry().allocationId().getId()),
|
||||||
new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(),
|
new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(),
|
||||||
|
@ -591,7 +590,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertEquals(0, indexShard.getActiveOperationsCount());
|
assertEquals(0, indexShard.getActiveOperationsCount());
|
||||||
if (indexShard.routingEntry().isRelocationTarget() == false) {
|
if (indexShard.routingEntry().isRelocationTarget() == false) {
|
||||||
try {
|
try {
|
||||||
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.INDEX, "");
|
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.BULK, "");
|
||||||
fail("shard shouldn't accept operations as replica");
|
fail("shard shouldn't accept operations as replica");
|
||||||
} catch (IllegalStateException ignored) {
|
} catch (IllegalStateException ignored) {
|
||||||
|
|
||||||
|
@ -610,14 +609,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
|
|
||||||
private Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException {
|
private Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException {
|
||||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||||
indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, "");
|
indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.BULK, "");
|
||||||
return fut.get();
|
return fut.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm)
|
private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm)
|
||||||
throws ExecutionException, InterruptedException {
|
throws ExecutionException, InterruptedException {
|
||||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||||
indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.INDEX, "");
|
indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.BULK, "");
|
||||||
return fut.get();
|
return fut.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,12 +664,12 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
if (shardRouting.primary() == false) {
|
if (shardRouting.primary() == false) {
|
||||||
final IllegalStateException e =
|
final IllegalStateException e =
|
||||||
expectThrows(IllegalStateException.class,
|
expectThrows(IllegalStateException.class,
|
||||||
() -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, ""));
|
() -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.BULK, ""));
|
||||||
assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary")));
|
assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary")));
|
||||||
}
|
}
|
||||||
|
|
||||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||||
final long translogGen = engineClosed ? -1 : indexShard.getTranslog().getGeneration().translogFileGeneration;
|
final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration;
|
||||||
|
|
||||||
final Releasable operation1;
|
final Releasable operation1;
|
||||||
final Releasable operation2;
|
final Releasable operation2;
|
||||||
|
@ -702,7 +701,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
};
|
};
|
||||||
|
|
||||||
indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbers.UNASSIGNED_SEQ_NO, onLockAcquired,
|
indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbers.UNASSIGNED_SEQ_NO, onLockAcquired,
|
||||||
ThreadPool.Names.INDEX, "");
|
ThreadPool.Names.BULK, "");
|
||||||
|
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertTrue(onFailure.get());
|
assertTrue(onFailure.get());
|
||||||
|
@ -748,7 +747,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(Releasable releasable) {
|
public void onResponse(Releasable releasable) {
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
||||||
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
||||||
onResponse.set(true);
|
onResponse.set(true);
|
||||||
|
@ -794,25 +793,25 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertNull(onFailure.get());
|
assertNull(onFailure.get());
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||||
Releasables.close(operation1);
|
Releasables.close(operation1);
|
||||||
// our operation should still be blocked
|
// our operation should still be blocked
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertNull(onFailure.get());
|
assertNull(onFailure.get());
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||||
Releasables.close(operation2);
|
Releasables.close(operation2);
|
||||||
barrier.await();
|
barrier.await();
|
||||||
// now lock acquisition should have succeeded
|
// now lock acquisition should have succeeded
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
if (engineClosed) {
|
if (engineClosed) {
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class));
|
assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class));
|
||||||
} else {
|
} else {
|
||||||
assertTrue(onResponse.get());
|
assertTrue(onResponse.get());
|
||||||
assertNull(onFailure.get());
|
assertNull(onFailure.get());
|
||||||
assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(translogGen + 1));
|
assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(translogGen + 1));
|
||||||
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
||||||
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
||||||
}
|
}
|
||||||
|
@ -1023,7 +1022,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ThreadPool.Names.INDEX, "");
|
ThreadPool.Names.BULK, "");
|
||||||
};
|
};
|
||||||
|
|
||||||
final long firstIncrement = 1 + (randomBoolean() ? 0 : 1);
|
final long firstIncrement = 1 + (randomBoolean() ? 0 : 1);
|
||||||
|
@ -1384,7 +1383,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
super.onResponse(releasable);
|
super.onResponse(releasable);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.INDEX, "i_" + i);
|
shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.BULK, "i_" + i);
|
||||||
onLockAcquiredActions.add(onLockAcquired);
|
onLockAcquiredActions.add(onLockAcquired);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1649,7 +1648,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertEquals(1, newShard.recoveryState().getTranslog().totalOperations());
|
assertEquals(1, newShard.recoveryState().getTranslog().totalOperations());
|
||||||
assertEquals(1, newShard.recoveryState().getTranslog().totalOperationsOnStart());
|
assertEquals(1, newShard.recoveryState().getTranslog().totalOperationsOnStart());
|
||||||
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
|
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
|
||||||
try (Translog.Snapshot snapshot = newShard.getTranslog().newSnapshot()) {
|
try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) {
|
||||||
Translog.Operation operation;
|
Translog.Operation operation;
|
||||||
int numNoops = 0;
|
int numNoops = 0;
|
||||||
while ((operation = snapshot.next()) != null) {
|
while ((operation = snapshot.next()) != null) {
|
||||||
|
@ -2050,7 +2049,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
@Override
|
@Override
|
||||||
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) throws IOException {
|
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) throws IOException {
|
||||||
final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps);
|
final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps);
|
||||||
assertFalse(replica.getTranslog().syncNeeded());
|
assertFalse(replica.isSyncNeeded());
|
||||||
return localCheckpoint;
|
return localCheckpoint;
|
||||||
}
|
}
|
||||||
}, true);
|
}, true);
|
||||||
|
|
|
@ -133,7 +133,7 @@ public class RefreshListenersTests extends ESTestCase {
|
||||||
(e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm);
|
(e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm);
|
||||||
engine = new InternalEngine(config);
|
engine = new InternalEngine(config);
|
||||||
engine.recoverFromTranslog();
|
engine.recoverFromTranslog();
|
||||||
listeners.setTranslog(engine.getTranslog());
|
listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -148,7 +148,7 @@ public class ClusterStateChanges extends AbstractComponent {
|
||||||
when(indexService.index()).thenReturn(indexMetaData.getIndex());
|
when(indexService.index()).thenReturn(indexMetaData.getIndex());
|
||||||
MapperService mapperService = mock(MapperService.class);
|
MapperService mapperService = mock(MapperService.class);
|
||||||
when(indexService.mapperService()).thenReturn(mapperService);
|
when(indexService.mapperService()).thenReturn(mapperService);
|
||||||
when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList());
|
when(mapperService.documentMapper()).thenReturn(null);
|
||||||
when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {});
|
when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {});
|
||||||
when(indexService.getIndexSortSupplier()).thenReturn(() -> null);
|
when(indexService.getIndexSortSupplier()).thenReturn(() -> null);
|
||||||
return indexService;
|
return indexService;
|
||||||
|
|
|
@ -113,7 +113,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
|
||||||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||||
final ShardId shardId = shard.shardId();
|
final ShardId shardId = shard.shardId();
|
||||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||||
shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, "");
|
shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.BULK, "");
|
||||||
try (Releasable operationLock = fut.get()) {
|
try (Releasable operationLock = fut.get()) {
|
||||||
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||||
flushService.attemptSyncedFlush(shardId, listener);
|
flushService.attemptSyncedFlush(shardId, listener);
|
||||||
|
|
|
@ -59,7 +59,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
}
|
}
|
||||||
flushShard(replica);
|
flushShard(replica);
|
||||||
replica.updateGlobalCheckpointOnReplica(initDocs - 1, "test");
|
replica.updateGlobalCheckpointOnReplica(initDocs - 1, "test");
|
||||||
replica.getTranslog().sync();
|
replica.sync();
|
||||||
final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
||||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs));
|
assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs));
|
||||||
recoveryTarget.decRef();
|
recoveryTarget.decRef();
|
||||||
|
@ -81,7 +81,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
// Advances the global checkpoint, a safe commit also advances
|
// Advances the global checkpoint, a safe commit also advances
|
||||||
{
|
{
|
||||||
replica.updateGlobalCheckpointOnReplica(initDocs + moreDocs - 1, "test");
|
replica.updateGlobalCheckpointOnReplica(initDocs + moreDocs - 1, "test");
|
||||||
replica.getTranslog().sync();
|
replica.sync();
|
||||||
final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
||||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs + moreDocs));
|
assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs + moreDocs));
|
||||||
recoveryTarget.decRef();
|
recoveryTarget.decRef();
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
try (ReplicationGroup shards = createGroup(0)) {
|
try (ReplicationGroup shards = createGroup(0)) {
|
||||||
shards.startPrimary();
|
shards.startPrimary();
|
||||||
int docs = shards.indexDocs(10);
|
int docs = shards.indexDocs(10);
|
||||||
shards.getPrimary().getTranslog().rollGeneration();
|
getTranslog(shards.getPrimary()).rollGeneration();
|
||||||
shards.flush();
|
shards.flush();
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
docs += shards.indexDocs(10);
|
docs += shards.indexDocs(10);
|
||||||
|
@ -69,7 +69,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
shards.addReplica();
|
shards.addReplica();
|
||||||
shards.startAll();
|
shards.startAll();
|
||||||
final IndexShard replica = shards.getReplicas().get(0);
|
final IndexShard replica = shards.getReplicas().get(0);
|
||||||
assertThat(replica.getTranslog().totalOperations(), equalTo(docs));
|
assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(docs));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
try (ReplicationGroup shards = createGroup(0)) {
|
try (ReplicationGroup shards = createGroup(0)) {
|
||||||
shards.startPrimary();
|
shards.startPrimary();
|
||||||
shards.indexDocs(10);
|
shards.indexDocs(10);
|
||||||
shards.getPrimary().getTranslog().rollGeneration();
|
getTranslog(shards.getPrimary()).rollGeneration();
|
||||||
shards.flush();
|
shards.flush();
|
||||||
shards.indexDocs(10);
|
shards.indexDocs(10);
|
||||||
final IndexShard replica = shards.addReplica();
|
final IndexShard replica = shards.addReplica();
|
||||||
|
@ -99,7 +99,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
releaseRecovery.countDown();
|
releaseRecovery.countDown();
|
||||||
future.get();
|
future.get();
|
||||||
// rolling/flushing is async
|
// rolling/flushing is async
|
||||||
assertBusy(() -> assertThat(replica.getTranslog().totalOperations(), equalTo(0)));
|
assertBusy(() -> assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(0)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
|
|
||||||
// delete #1
|
// delete #1
|
||||||
orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL, u -> {});
|
orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL, u -> {});
|
||||||
orgReplica.getTranslog().rollGeneration(); // isolate the delete in it's own generation
|
getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation
|
||||||
// index #0
|
// index #0
|
||||||
orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
|
orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
|
||||||
SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON), u -> {});
|
SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON), u -> {});
|
||||||
|
@ -167,7 +167,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
shards.recoverReplica(newReplica);
|
shards.recoverReplica(newReplica);
|
||||||
shards.assertAllEqual(3);
|
shards.assertAllEqual(3);
|
||||||
|
|
||||||
assertThat(newReplica.getTranslog().totalOperations(), equalTo(translogOps));
|
assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(translogOps));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
|
|
||||||
IndexShard replica = shards.getReplicas().get(0);
|
IndexShard replica = shards.getReplicas().get(0);
|
||||||
final String historyUUID = replica.getHistoryUUID();
|
final String historyUUID = replica.getHistoryUUID();
|
||||||
Translog.TranslogGeneration translogGeneration = replica.getTranslog().getGeneration();
|
Translog.TranslogGeneration translogGeneration = getTranslog(replica).getGeneration();
|
||||||
shards.removeReplica(replica);
|
shards.removeReplica(replica);
|
||||||
replica.close("test", false);
|
replica.close("test", false);
|
||||||
IndexWriterConfig iwc = new IndexWriterConfig(null)
|
IndexWriterConfig iwc = new IndexWriterConfig(null)
|
||||||
|
@ -219,7 +219,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
shards.recoverReplica(newReplica);
|
shards.recoverReplica(newReplica);
|
||||||
// file based recovery should be made
|
// file based recovery should be made
|
||||||
assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty()));
|
assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty()));
|
||||||
assertThat(newReplica.getTranslog().totalOperations(), equalTo(numDocs));
|
assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs));
|
||||||
|
|
||||||
// history uuid was restored
|
// history uuid was restored
|
||||||
assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID));
|
assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID));
|
||||||
|
@ -238,7 +238,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
}
|
}
|
||||||
final IndexShard replica = shards.addReplica();
|
final IndexShard replica = shards.addReplica();
|
||||||
shards.recoverReplica(replica);
|
shards.recoverReplica(replica);
|
||||||
assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1));
|
assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,7 +291,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId());
|
final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId());
|
||||||
shards.recoverReplica(newReplica);
|
shards.recoverReplica(newReplica);
|
||||||
|
|
||||||
try (Translog.Snapshot snapshot = newReplica.getTranslog().newSnapshot()) {
|
try (Translog.Snapshot snapshot = getTranslog(newReplica).newSnapshot()) {
|
||||||
assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs));
|
assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs));
|
||||||
}
|
}
|
||||||
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs));
|
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs));
|
||||||
|
@ -321,7 +321,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
shards.recoverReplica(replica);
|
shards.recoverReplica(replica);
|
||||||
// Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false)
|
// Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false)
|
||||||
assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false)));
|
assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false)));
|
||||||
assertThat(replica.getTranslog().totalOperations(), equalTo(numDocs));
|
assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs));
|
||||||
shards.assertAllEqual(numDocs);
|
shards.assertAllEqual(numDocs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,8 +68,8 @@ import static org.hamcrest.Matchers.is;
|
||||||
import static org.hamcrest.core.IsNull.notNullValue;
|
import static org.hamcrest.core.IsNull.notNullValue;
|
||||||
import static org.hamcrest.core.IsNull.nullValue;
|
import static org.hamcrest.core.IsNull.nullValue;
|
||||||
|
|
||||||
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456")
|
|
||||||
@ESIntegTestCase.SuiteScopeTestCase
|
@ESIntegTestCase.SuiteScopeTestCase
|
||||||
|
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456")
|
||||||
public class MovAvgIT extends ESIntegTestCase {
|
public class MovAvgIT extends ESIntegTestCase {
|
||||||
private static final String INTERVAL_FIELD = "l_value";
|
private static final String INTERVAL_FIELD = "l_value";
|
||||||
private static final String VALUE_FIELD = "v_value";
|
private static final String VALUE_FIELD = "v_value";
|
||||||
|
@ -1296,7 +1296,7 @@ public class MovAvgIT extends ESIntegTestCase {
|
||||||
} else {
|
} else {
|
||||||
assertThat("[_count] movavg is null", countMovAvg, notNullValue());
|
assertThat("[_count] movavg is null", countMovAvg, notNullValue());
|
||||||
assertEquals("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]",
|
assertEquals("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]",
|
||||||
countMovAvg.value(), expectedCount, 0.1);
|
countMovAvg.value(), expectedCount, 0.1 * Math.abs(countMovAvg.value()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a gap bucket
|
// This is a gap bucket
|
||||||
|
@ -1308,7 +1308,7 @@ public class MovAvgIT extends ESIntegTestCase {
|
||||||
} else {
|
} else {
|
||||||
assertThat("[value] movavg is null", valuesMovAvg, notNullValue());
|
assertThat("[value] movavg is null", valuesMovAvg, notNullValue());
|
||||||
assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]",
|
assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]",
|
||||||
valuesMovAvg.value(), expectedValue, 0.1);
|
valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,10 +85,6 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase {
|
||||||
|
|
||||||
assertThat(counter, equalTo(rejections));
|
assertThat(counter, equalTo(rejections));
|
||||||
assertThat(stats(threadPool, threadPoolName).getRejected(), equalTo(rejections));
|
assertThat(stats(threadPool, threadPoolName).getRejected(), equalTo(rejections));
|
||||||
|
|
||||||
if (threadPoolName.equals(ThreadPool.Names.INDEX)) {
|
|
||||||
assertSettingDeprecationsAndWarnings(new String[]{"thread_pool.index.queue_size", "thread_pool.index.size"});
|
|
||||||
}
|
|
||||||
} finally {
|
} finally {
|
||||||
terminateThreadPoolIfNeeded(threadPool);
|
terminateThreadPoolIfNeeded(threadPool);
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,8 +60,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIndexingThreadPoolsMaxSize() throws InterruptedException {
|
public void testBulkThreadPoolsMaxSize() {
|
||||||
final String name = randomFrom(Names.BULK, Names.INDEX);
|
|
||||||
final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY);
|
final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY);
|
||||||
final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE);
|
final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE);
|
||||||
|
|
||||||
|
@ -74,7 +73,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
||||||
try {
|
try {
|
||||||
tp = new ThreadPool(Settings.builder()
|
tp = new ThreadPool(Settings.builder()
|
||||||
.put("node.name", "testIndexingThreadPoolsMaxSize")
|
.put("node.name", "testIndexingThreadPoolsMaxSize")
|
||||||
.put("thread_pool." + name + ".size", tooBig)
|
.put("thread_pool." + Names.BULK + ".size", tooBig)
|
||||||
.build());
|
.build());
|
||||||
} finally {
|
} finally {
|
||||||
terminateThreadPoolIfNeeded(tp);
|
terminateThreadPoolIfNeeded(tp);
|
||||||
|
@ -84,15 +83,11 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
initial,
|
initial,
|
||||||
hasToString(containsString(
|
hasToString(containsString(
|
||||||
"Failed to parse value [" + tooBig + "] for setting [thread_pool." + name + ".size] must be ")));
|
"Failed to parse value [" + tooBig + "] for setting [thread_pool." + Names.BULK + ".size] must be ")));
|
||||||
|
|
||||||
if (name.equals(Names.INDEX)) {
|
|
||||||
assertSettingDeprecationsAndWarnings(new String[] { "thread_pool.index.size" });
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static int getExpectedThreadPoolSize(Settings settings, String name, int size) {
|
private static int getExpectedThreadPoolSize(Settings settings, String name, int size) {
|
||||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
if (name.equals(ThreadPool.Names.BULK)) {
|
||||||
return Math.min(size, EsExecutors.numberOfProcessors(settings));
|
return Math.min(size, EsExecutors.numberOfProcessors(settings));
|
||||||
} else {
|
} else {
|
||||||
return size;
|
return size;
|
||||||
|
@ -120,10 +115,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
||||||
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
|
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
|
||||||
// keep alive does not apply to fixed thread pools
|
// keep alive does not apply to fixed thread pools
|
||||||
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
|
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
|
||||||
|
|
||||||
if (threadPoolName.equals(Names.INDEX)) {
|
|
||||||
assertSettingDeprecationsAndWarnings(new String[] { "thread_pool.index.size" });
|
|
||||||
}
|
|
||||||
} finally {
|
} finally {
|
||||||
terminateThreadPoolIfNeeded(threadPool);
|
terminateThreadPoolIfNeeded(threadPool);
|
||||||
}
|
}
|
||||||
|
@ -179,10 +170,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
||||||
latch.await(3, TimeUnit.SECONDS); // if this throws then ThreadPool#shutdownNow did not interrupt
|
latch.await(3, TimeUnit.SECONDS); // if this throws then ThreadPool#shutdownNow did not interrupt
|
||||||
assertThat(oldExecutor.isShutdown(), equalTo(true));
|
assertThat(oldExecutor.isShutdown(), equalTo(true));
|
||||||
assertThat(oldExecutor.isTerminating() || oldExecutor.isTerminated(), equalTo(true));
|
assertThat(oldExecutor.isTerminating() || oldExecutor.isTerminated(), equalTo(true));
|
||||||
|
|
||||||
if (threadPoolName.equals(Names.INDEX)) {
|
|
||||||
assertSettingDeprecationsAndWarnings(new String[] { "thread_pool.index.queue_size" });
|
|
||||||
}
|
|
||||||
} finally {
|
} finally {
|
||||||
terminateThreadPoolIfNeeded(threadPool);
|
terminateThreadPoolIfNeeded(threadPool);
|
||||||
}
|
}
|
||||||
|
|
|
@ -645,4 +645,10 @@ public abstract class EngineTestCase extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Exposes a translog associated with the given engine for testing purpose.
|
||||||
|
*/
|
||||||
|
public static Translog getTranslog(Engine engine) {
|
||||||
|
return engine.getTranslog();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.elasticsearch.index.cache.IndexCache;
|
||||||
import org.elasticsearch.index.cache.query.DisabledQueryCache;
|
import org.elasticsearch.index.cache.query.DisabledQueryCache;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.engine.EngineFactory;
|
import org.elasticsearch.index.engine.EngineFactory;
|
||||||
|
import org.elasticsearch.index.engine.EngineTestCase;
|
||||||
import org.elasticsearch.index.engine.InternalEngineFactory;
|
import org.elasticsearch.index.engine.InternalEngineFactory;
|
||||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
|
@ -67,6 +68,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||||
import org.elasticsearch.index.store.DirectoryService;
|
import org.elasticsearch.index.store.DirectoryService;
|
||||||
import org.elasticsearch.index.store.Store;
|
import org.elasticsearch.index.store.Store;
|
||||||
|
import org.elasticsearch.index.translog.Translog;
|
||||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||||
|
@ -644,6 +646,10 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
||||||
return indexShard.getEngine();
|
return indexShard.getEngine();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Translog getTranslog(IndexShard shard) {
|
||||||
|
return EngineTestCase.getTranslog(getEngine(shard));
|
||||||
|
}
|
||||||
|
|
||||||
public static ReplicationTracker getReplicationTracker(IndexShard indexShard) {
|
public static ReplicationTracker getReplicationTracker(IndexShard indexShard) {
|
||||||
return indexShard.getReplicationTracker();
|
return indexShard.getReplicationTracker();
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,6 +76,7 @@ import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.engine.CommitStats;
|
import org.elasticsearch.index.engine.CommitStats;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
|
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||||
|
@ -1158,7 +1159,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
for (IndexService indexService : indexServices) {
|
for (IndexService indexService : indexServices) {
|
||||||
for (IndexShard indexShard : indexService) {
|
for (IndexShard indexShard : indexService) {
|
||||||
try {
|
try {
|
||||||
indexShard.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs();
|
IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs();
|
||||||
} catch (AlreadyClosedException ok) {
|
} catch (AlreadyClosedException ok) {
|
||||||
// all good
|
// all good
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class VersionUtils {
|
||||||
|
|
||||||
Version last = versions.remove(versions.size() - 1);
|
Version last = versions.remove(versions.size() - 1);
|
||||||
assert last.equals(current) : "The highest version must be the current one "
|
assert last.equals(current) : "The highest version must be the current one "
|
||||||
+ "but was [" + versions.get(versions.size() - 1) + "] and current was [" + current + "]";
|
+ "but was [" + last + "] and current was [" + current + "]";
|
||||||
|
|
||||||
if (current.revision != 0) {
|
if (current.revision != 0) {
|
||||||
/* If we are in a stable branch there should be no unreleased version constants
|
/* If we are in a stable branch there should be no unreleased version constants
|
||||||
|
|
|
@ -28,9 +28,9 @@ import java.util.Collections;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import static java.util.Collections.singletonList;
|
|
||||||
import static java.util.stream.Collectors.toCollection;
|
import static java.util.stream.Collectors.toCollection;
|
||||||
import static java.util.stream.Collectors.toList;
|
import static java.util.stream.Collectors.toList;
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||||
|
@ -305,6 +305,24 @@ public class VersionUtilsTests extends ESTestCase {
|
||||||
TestNewMinorBranchIn6x.V_6_2_0)));
|
TestNewMinorBranchIn6x.V_6_2_0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static class TestIncorrectCurrentVersion {
|
||||||
|
public static final Version V_5_3_0 = Version.fromString("5.3.0");
|
||||||
|
public static final Version V_5_3_1 = Version.fromString("5.3.1");
|
||||||
|
public static final Version V_5_4_0 = Version.fromString("5.4.0");
|
||||||
|
public static final Version V_5_4_1 = Version.fromString("5.4.1");
|
||||||
|
public static final Version CURRENT = V_5_4_1;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testIncorrectCurrentVersion() {
|
||||||
|
Version previousVersion = TestIncorrectCurrentVersion.V_5_4_0;
|
||||||
|
AssertionError error = expectThrows(AssertionError.class, () ->
|
||||||
|
VersionUtils.resolveReleasedVersions(previousVersion, TestIncorrectCurrentVersion.class));
|
||||||
|
|
||||||
|
String message = error.getMessage();
|
||||||
|
assertThat(message, containsString(TestIncorrectCurrentVersion.CURRENT.toString()));
|
||||||
|
assertThat(message, containsString(previousVersion.toString()));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests that {@link Version#minimumCompatibilityVersion()} and {@link VersionUtils#allReleasedVersions()}
|
* Tests that {@link Version#minimumCompatibilityVersion()} and {@link VersionUtils#allReleasedVersions()}
|
||||||
* agree with the list of wire and index compatible versions we build in gradle.
|
* agree with the list of wire and index compatible versions we build in gradle.
|
||||||
|
|
Loading…
Reference in New Issue