Merge branch 'master' into ccr
* master: Remove the index thread pool (#29556) Remove extra copy in ScriptDocValues.Strings Fix full cluster restart test recovery (#29545) Fix binary doc values fetching in _search (#29567) Mutes failing MovAvgIT tests Fix the assertion message for an incorrect current version. (#29572) Fix the version ID for v5.6.10. (#29570) Painless Spec Documentation Clean Up (#29441) Add versions 5.6.10 and 6.2.5 [TEST] test against scaled value instead of fixed epsilon in MovAvgIT Remove `flatSettings` support from request classes (#29560) MapperService to wrap a single DocumentMapper. (#29511) Fix dependency checks on libs when generating Eclipse configuration. (#29550) Add null_value support to geo_point type (#29451) Add documentation about the include_type_name option. (#29555) Enforce translog access via engine (#29542)
This commit is contained in:
commit
4be1488324
|
@ -572,7 +572,6 @@ public final class Request {
|
|||
|
||||
static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
parameters.withFlatSettings(clusterUpdateSettingsRequest.flatSettings());
|
||||
parameters.withTimeout(clusterUpdateSettingsRequest.timeout());
|
||||
parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout());
|
||||
HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
|
@ -603,7 +602,6 @@ public final class Request {
|
|||
params.withLocal(request.local());
|
||||
params.withHuman(request.humanReadable());
|
||||
params.withIndicesOptions(request.indicesOptions());
|
||||
params.withFlatSettings(request.flatSettings());
|
||||
params.withIncludeDefaults(request.includeDefaults());
|
||||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
}
|
||||
|
@ -613,7 +611,6 @@ public final class Request {
|
|||
parameters.withTimeout(updateSettingsRequest.timeout());
|
||||
parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(updateSettingsRequest.indicesOptions());
|
||||
parameters.withFlatSettings(updateSettingsRequest.flatSettings());
|
||||
parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting());
|
||||
|
||||
String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices();
|
||||
|
|
|
@ -272,7 +272,6 @@ public class RequestTests extends ESTestCase {
|
|||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams);
|
||||
setRandomLocal(getIndexRequest, expectedParams);
|
||||
setRandomFlatSettings(getIndexRequest::flatSettings, expectedParams);
|
||||
setRandomHumanReadable(getIndexRequest, expectedParams);
|
||||
setRandomIncludeDefaults(getIndexRequest, expectedParams);
|
||||
|
||||
|
@ -1292,7 +1291,6 @@ public class RequestTests extends ESTestCase {
|
|||
public void testClusterPutSettings() throws IOException {
|
||||
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomFlatSettings(request::flatSettings, expectedParams);
|
||||
setRandomMasterTimeout(request, expectedParams);
|
||||
setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
|
||||
|
@ -1344,7 +1342,6 @@ public class RequestTests extends ESTestCase {
|
|||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2);
|
||||
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices);
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomFlatSettings(updateSettingsRequest::flatSettings, expectedParams);
|
||||
setRandomMasterTimeout(updateSettingsRequest, expectedParams);
|
||||
setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams);
|
||||
|
@ -1627,16 +1624,6 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomFlatSettings(Consumer<Boolean> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
boolean flatSettings = randomBoolean();
|
||||
setter.accept(flatSettings);
|
||||
if (flatSettings) {
|
||||
expectedParams.put("flat_settings", String.valueOf(flatSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomMasterTimeout(MasterNodeRequest<?> request, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
String masterTimeout = randomTimeValue();
|
||||
|
|
|
@ -124,10 +124,6 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::put-settings-request-masterTimeout
|
||||
|
||||
// tag::put-settings-request-flat-settings
|
||||
request.flatSettings(true); // <1>
|
||||
// end::put-settings-request-flat-settings
|
||||
|
||||
// tag::put-settings-execute
|
||||
ClusterUpdateSettingsResponse response = client.cluster().putSettings(request);
|
||||
// end::put-settings-execute
|
||||
|
|
|
@ -58,7 +58,6 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -114,8 +113,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
request.local(false); // <1>
|
||||
request.humanReadable(true); // <2>
|
||||
request.includeDefaults(false); // <3>
|
||||
request.flatSettings(false); // <4>
|
||||
request.indicesOptions(indicesOptions); // <5>
|
||||
request.indicesOptions(indicesOptions); // <4>
|
||||
// end::indices-exists-request-optionals
|
||||
|
||||
// tag::indices-exists-response
|
||||
|
@ -1433,9 +1431,6 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// end::put-settings-settings-source
|
||||
}
|
||||
|
||||
// tag::put-settings-request-flat-settings
|
||||
request.flatSettings(true); // <1>
|
||||
// end::put-settings-request-flat-settings
|
||||
// tag::put-settings-request-preserveExisting
|
||||
request.setPreserveExisting(false); // <1>
|
||||
// end::put-settings-request-preserveExisting
|
||||
|
|
|
@ -54,13 +54,6 @@ include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-setti
|
|||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings]
|
||||
--------------------------------------------------
|
||||
<1> Whether the updated settings returned in the `ClusterUpdateSettings` should
|
||||
be in a flat format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-timeout]
|
||||
|
|
|
@ -23,8 +23,7 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-req
|
|||
<1> Whether to return local information or retrieve the state from master node
|
||||
<2> Return result in a format suitable for humans
|
||||
<3> Whether to return all default setting for each of the indices
|
||||
<4> Return settings in flat format
|
||||
<5> Controls how unavailable indices are resolved and how wildcard expressions are expanded
|
||||
<4> Controls how unavailable indices are resolved and how wildcard expressions are expanded
|
||||
|
||||
[[java-rest-high-indices-sync]]
|
||||
==== Synchronous Execution
|
||||
|
|
|
@ -55,13 +55,6 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-setti
|
|||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-flat-settings]
|
||||
--------------------------------------------------
|
||||
<1> Whether the updated settings returned in the `UpdateSettings` should
|
||||
be in a flat format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting]
|
||||
|
|
|
@ -5,39 +5,6 @@ include::../Versions.asciidoc[]
|
|||
|
||||
include::painless-getting-started.asciidoc[]
|
||||
|
||||
// include::painless-examples.asciidoc[]
|
||||
|
||||
// include::painless-design.asciidoc[]
|
||||
|
||||
include::painless-lang-spec.asciidoc[]
|
||||
|
||||
include::painless-syntax.asciidoc[]
|
||||
|
||||
include::painless-api-reference.asciidoc[]
|
||||
|
||||
////
|
||||
Proposed Outline (WIP)
|
||||
Getting Started with Painless
|
||||
Accessing Doc Values
|
||||
Updating Fields
|
||||
Working with Dates
|
||||
Using Regular Expressions
|
||||
Debugging Painless Scripts
|
||||
|
||||
Example Scripts
|
||||
Using Painless in Script Fields
|
||||
Using Painless in Watches
|
||||
Using Painless in Function Score Queries
|
||||
Using Painless in Script Queries
|
||||
Using Painless When Updating Docs
|
||||
Using Painless When Reindexing
|
||||
|
||||
How Painless Works
|
||||
Painless Architecture
|
||||
Dispatching Functions
|
||||
|
||||
Painless Language Specification
|
||||
Painless API
|
||||
////
|
||||
|
||||
Painless API Reference
|
||||
|
|
|
@ -1,17 +1,13 @@
|
|||
["appendix",id="painless-api-reference"]
|
||||
= Painless API Reference
|
||||
[[painless-api-reference]]
|
||||
== Painless API Reference
|
||||
|
||||
Painless has a strict whitelist for methods and
|
||||
classes to make sure that all painless scripts are secure and fast. Most of
|
||||
these methods are exposed directly from the JRE while others are part of
|
||||
Elasticsearch or Painless itself. Below is a list of all available methods
|
||||
grouped under the classes on which you can call them. Clicking on the method
|
||||
name takes you to the documentation for the method.
|
||||
|
||||
NOTE: Methods defined in the JRE also have a `(java 9)` link which can be used
|
||||
to see the method's documentation in Java 9 while clicking on the method's name
|
||||
goes to the Java 8 documentation. Usually these aren't different but it is
|
||||
worth going to the version that matches the version of Java you are using to
|
||||
run Elasticsearch just in case.
|
||||
Painless has a strict whitelist for methods and classes to ensure all
|
||||
painless scripts are secure. Most of these methods are exposed directly
|
||||
from the Java Runtime Enviroment (JRE) while others are part of
|
||||
Elasticsearch or Painless itself. Below is a list of all available
|
||||
classes grouped with their respected methods. Clicking on the method
|
||||
name takes you to the documentation for that specific method. Methods
|
||||
defined in the JRE also have a `(java 9)` link which can be used to see
|
||||
the method's documentation in Java 9.
|
||||
|
||||
include::painless-api-reference/index.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
[[painless-casting]]
|
||||
=== Casting
|
||||
|
||||
Casting is the conversion of one type to another. Implicit casts are casts that
|
||||
occur automatically, such as during an assignment operation. Explicit casts are
|
||||
casts where you use the casting operator to explicitly convert one type to
|
||||
another. This is necessary during operations where the cast cannot be inferred.
|
||||
|
||||
To cast to a new type, precede the expression by the new type enclosed in
|
||||
parentheses, for example
|
||||
`(int)x`.
|
||||
|
||||
The following sections specify the implicit casts that can be performed and the
|
||||
explicit casts that are allowed. The only other permitted cast is casting
|
||||
a single character `String` to a `char`.
|
||||
|
||||
*Grammar:*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
cast: '(' TYPE ')' expression
|
||||
----
|
||||
|
||||
[[numeric-casting]]
|
||||
==== Numeric Casting
|
||||
|
||||
The following table shows the allowed implicit and explicit casts between
|
||||
numeric types. Read the table by row. To find out if you need to explicitly
|
||||
cast from type A to type B, find the row for type A and scan across to the
|
||||
column for type B.
|
||||
|
||||
IMPORTANT: Explicit casts between numeric types can result in some data loss. A
|
||||
smaller numeric type cannot necessarily accommodate the value from a larger
|
||||
numeric type. You might also lose precision when casting from integer types
|
||||
to floating point types.
|
||||
|
||||
|====
|
||||
| | byte | short | char | int | long | float | double
|
||||
| byte | | implicit | implicit | implicit | implicit | implicit | implicit
|
||||
| short | explicit | | explicit | implicit | implicit | implicit | implicit
|
||||
| char | explicit | explicit | | implicit | implicit | implicit | implicit
|
||||
| int | explicit | explicit | explicit | | implicit | implicit | implicit
|
||||
| long | explicit | explicit | explicit | explicit | | implicit | implicit
|
||||
| float | explicit | explicit | explicit | explicit | explicit | | implicit
|
||||
| double | explicit | explicit | explicit | explicit | explicit | explicit |
|
||||
|====
|
||||
|
||||
|
||||
Example(s)
|
||||
[source,Java]
|
||||
----
|
||||
int a = 1; // Declare int variable a and set it to the literal
|
||||
// value 1
|
||||
long b = a; // Declare long variable b and set it to int variable
|
||||
// a with an implicit cast to convert from int to long
|
||||
short c = (short)b; // Declare short variable c, explicitly cast b to a
|
||||
// short, and assign b to c
|
||||
byte d = a; // ERROR: Casting an int to a byte requires an explicit
|
||||
// cast
|
||||
double e = (double)a; // Explicitly cast int variable a to a double and assign
|
||||
// it to the double variable e. The explicit cast is
|
||||
// allowed, but it is not necessary.
|
||||
----
|
||||
|
||||
[[reference-casting]]
|
||||
==== Reference Casting
|
||||
|
||||
A reference type can be implicitly cast to another reference type as long as
|
||||
the type being cast _from_ is a descendant of the type being cast _to_. A
|
||||
reference type can be explicitly cast _to_ if the type being cast to is a
|
||||
descendant of the type being cast _from_.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
List x; // Declare List variable x
|
||||
ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a
|
||||
// newly allocated ArrayList [1]
|
||||
x = y; // Assign Arraylist y to List x using an
|
||||
// implicit cast
|
||||
y = (ArrayList)x; // Explicitly cast List x to an ArrayList and
|
||||
// assign it to ArrayList y
|
||||
x = (List)y; // Set List x to ArrayList y using an explicit
|
||||
// cast (the explicit cast is not necessary)
|
||||
y = x; // ERROR: List x cannot be implicitly cast to
|
||||
// an ArrayList, an explicit cast is required
|
||||
Map m = y; // ERROR: Cannot implicitly or explicitly cast [2]
|
||||
// an ArrayList to a Map, no relationship
|
||||
// exists between the two types.
|
||||
----
|
||||
[1] `ArrayList` is a descendant of the `List` type.
|
||||
[2] `Map` is unrelated to the `List` and `ArrayList` types.
|
||||
|
||||
[[def-type-casting]]
|
||||
==== def Type Casting
|
||||
All primitive and reference types can always be implicitly cast to
|
||||
`def`. While it is possible to explicitly cast to `def`, it is not necessary.
|
||||
|
||||
However, it is not always possible to implicitly cast a `def` to other
|
||||
primitive and reference types. An explicit cast is required if an explicit
|
||||
cast would normally be required between the non-def types.
|
||||
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
def x; // Declare def variable x and set it to null
|
||||
x = 3; // Set the def variable x to the literal 3 with an implicit
|
||||
// cast from int to def
|
||||
double a = x; // Declare double variable a and set it to def variable x,
|
||||
// which contains a double
|
||||
int b = x; // ERROR: Results in a run-time error because an explicit cast is
|
||||
// required to cast from a double to an int
|
||||
int c = (int)x; // Declare int variable c, explicitly cast def variable x to an
|
||||
// int, and assign x to c
|
||||
----
|
||||
|
||||
[[boxing-unboxing]]
|
||||
==== Boxing and Unboxing
|
||||
|
||||
Boxing is where a cast is used to convert a primitive type to its corresponding
|
||||
reference type. Unboxing is the reverse, converting a reference type to the
|
||||
corresponding primitive type.
|
||||
|
||||
There are two places Painless performs implicit boxing and unboxing:
|
||||
|
||||
* When you call methods, Painless automatically boxes and unboxes arguments
|
||||
so you can specify either primitive types or their corresponding reference
|
||||
types.
|
||||
* When you use the `def` type, Painless automatically boxes and unboxes as
|
||||
needed when converting to and from `def`.
|
||||
|
||||
The casting operator does not support any way to explicitly box a primitive
|
||||
type or unbox a reference type.
|
||||
|
||||
If a primitive type needs to be converted to a reference type, the Painless
|
||||
reference type API supports methods that can do that. However, under normal
|
||||
circumstances this should not be necessary.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
Integer x = 1; // ERROR: not a legal implicit cast
|
||||
Integer y = (Integer)1; // ERROR: not a legal explicit cast
|
||||
int a = new Integer(1); // ERROR: not a legal implicit cast
|
||||
int b = (int)new Integer(1); // ERROR: not a legal explicit cast
|
||||
----
|
||||
|
||||
[[promotion]]
|
||||
==== Promotion
|
||||
|
||||
Promotion is where certain operations require types to be either a minimum
|
||||
numerical type or for two (or more) types to be equivalent.
|
||||
The documentation for each operation that has these requirements
|
||||
includes promotion tables that describe how this is handled.
|
||||
|
||||
When an operation promotes a type or types, the resultant type
|
||||
of the operation is the promoted type. Types can be promoted to def
|
||||
at compile-time; however, at run-time, the resultant type will be the
|
||||
promotion of the types the `def` is representing.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal
|
||||
// 2 is promoted to a double and the resulting value is a double.
|
||||
|
||||
def x = 1; // Declare def variable x and set it to the literal int 1 through
|
||||
// an implicit cast
|
||||
x + 2.0F // Add def variable x and the literal float 2.0.
|
||||
// At compile-time the types are promoted to def.
|
||||
// At run-time the types are promoted to float.
|
||||
----
|
|
@ -0,0 +1,51 @@
|
|||
[[painless-comments]]
|
||||
=== Comments
|
||||
|
||||
Painless supports both single-line and multi-line comments. Comments can be
|
||||
included anywhere within a script. Use the `//` token anywhere on a line to
|
||||
specify a single-line comment. All characters from the `//` token to the end
|
||||
of the line are ignored. Use an opening `/*` token and a closing `*/` token
|
||||
to specify a multi-line comment. Multi-line comments can start anywhere on a
|
||||
line, and all characters in between the `/*` token and `*/` token are ignored.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
SINGLE_LINE_COMMENT: '//' .*? [\n\r];
|
||||
MULTI_LINE_COMMENT: '/*' .*? '*/';
|
||||
----
|
||||
|
||||
*Examples*
|
||||
|
||||
Single-line comments.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
// single-line comment
|
||||
|
||||
int value; // single-line comment
|
||||
----
|
||||
|
||||
Multi-line comments.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
/* multi-
|
||||
line
|
||||
comment */
|
||||
|
||||
int value; /* multi-
|
||||
line
|
||||
comment */ value = 0;
|
||||
|
||||
int value; /* multi-line
|
||||
comment */
|
||||
|
||||
/* multi-line
|
||||
comment */ int value;
|
||||
|
||||
int value; /* multi-line
|
||||
comment */ value = 0;
|
||||
|
||||
int value; /* multi-line comment */ value = 0;
|
||||
----
|
|
@ -2,7 +2,7 @@ _Painless_ is a simple, secure scripting language designed specifically for use
|
|||
with Elasticsearch. It is the default scripting language for Elasticsearch and
|
||||
can safely be used for inline and stored scripts. For a detailed description of
|
||||
the Painless syntax and language features, see the
|
||||
{painless}/painless-specification.html[Painless Language Specification].
|
||||
{painless}/painless-lang-spec.html[Painless Language Specification].
|
||||
|
||||
[[painless-features]]
|
||||
You can use Painless anywhere scripts can be used in Elasticsearch. Painless
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
[[painless-syntax]]
|
||||
=== Painless Syntax
|
||||
[[painless-general-syntax]]
|
||||
=== General Syntax
|
||||
|
||||
[float]
|
||||
[[control-flow]]
|
||||
==== Control flow
|
||||
|
||||
|
@ -17,7 +16,6 @@ for (item : list) {
|
|||
}
|
||||
---------------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[functions]]
|
||||
==== Functions
|
||||
|
||||
|
@ -32,7 +30,6 @@ if (isNegative(someVar)) {
|
|||
}
|
||||
---------------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[lambda-expressions]]
|
||||
==== Lambda expressions
|
||||
Lambda expressions and method references work the same as in https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java].
|
||||
|
@ -49,7 +46,6 @@ list.sort(Integer::compare);
|
|||
You can make method references to functions within the script with `this`,
|
||||
for example `list.sort(this::mycompare)`.
|
||||
|
||||
[float]
|
||||
[[patterns]]
|
||||
==== Patterns
|
||||
|
||||
|
@ -62,7 +58,6 @@ are always constants and compiled efficiently a single time.
|
|||
Pattern p = /[aeiou]/
|
||||
---------------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[pattern-flags]]
|
||||
===== Pattern flags
|
||||
|
||||
|
@ -84,34 +79,3 @@ Pattern class] using these characters:
|
|||
|`u` | UNICODE_CASE | `'Ɛ' ==~ /ɛ/iu`
|
||||
|`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x`
|
||||
|=======================================================================
|
||||
|
||||
[float]
|
||||
[[painless-deref]]
|
||||
==== Dereferences
|
||||
|
||||
Like lots of languages, Painless uses `.` to reference fields and call methods:
|
||||
|
||||
[source,painless]
|
||||
---------------------------------------------------------
|
||||
String foo = 'foo';
|
||||
TypeWithGetterOrPublicField bar = new TypeWithGetterOrPublicField()
|
||||
return foo.length() + bar.x
|
||||
---------------------------------------------------------
|
||||
|
||||
Like Groovy, Painless uses `?.` to perform null-safe references, with the
|
||||
result being `null` if the left hand side is `null`:
|
||||
|
||||
[source,painless]
|
||||
---------------------------------------------------------
|
||||
String foo = null;
|
||||
return foo?.length() // Returns null
|
||||
---------------------------------------------------------
|
||||
|
||||
Unlike Groovy, Painless doesn't support writing to `null` values with this
|
||||
operator:
|
||||
|
||||
[source,painless]
|
||||
---------------------------------------------------------
|
||||
TypeWithSetterOrPublicField foo = null;
|
||||
foo?.x = 'bar' // Compile error
|
||||
---------------------------------------------------------
|
|
@ -0,0 +1,13 @@
|
|||
[[painless-keywords]]
|
||||
=== Keywords
|
||||
|
||||
The keywords in the table below are reserved for built-in language
|
||||
features. These keywords cannot be used as <<identifiers, identifiers>> or
|
||||
<<painless-types, types>>.
|
||||
|
||||
[cols="^1,^1,^1,^1,^1"]
|
||||
|====
|
||||
| if | else | while | do | for
|
||||
| in | continue | break | return | new
|
||||
| try | catch | throw | this | instanceof
|
||||
|====
|
|
@ -1,73 +1,34 @@
|
|||
[[painless-specification]]
|
||||
[[painless-lang-spec]]
|
||||
== Painless Language Specification
|
||||
|
||||
Painless uses a Java-style syntax that is similar to Groovy. In fact, most
|
||||
Painless scripts are also valid Groovy, and simple Groovy scripts are typically
|
||||
valid Painless. This specification assumes you have at least a passing
|
||||
familiarity with Java and related languages.
|
||||
|
||||
Painless is essentially a subset of Java with some additional scripting
|
||||
language features that make scripts easier to write. However, there are some
|
||||
important differences, particularly with the casting model. For more detailed
|
||||
Painless is a scripting language designed for security and performance.
|
||||
Painless syntax is similar to Java syntax along with some additional
|
||||
features such as dynamic typing, Map and List accessor shortcuts, and array
|
||||
initializers. As a direct comparison to Java, there are some important
|
||||
differences, especially related to the casting model. For more detailed
|
||||
conceptual information about the basic constructs that Java and Painless share,
|
||||
refer to the corresponding topics in the
|
||||
https://docs.oracle.com/javase/specs/jls/se8/html/index.html[Java Language
|
||||
Specification].
|
||||
|
||||
Painless scripts are parsed and compiled using the http://www.antlr.org/[ANTLR4]
|
||||
and http://asm.ow2.org/[ASM] libraries. Painless scripts are compiled directly
|
||||
into Java byte code and executed against a standard Java Virtual Machine. This
|
||||
specification uses ANTLR4 grammar notation to describe the allowed syntax.
|
||||
and http://asm.ow2.org/[ASM] libraries. Scripts are compiled directly
|
||||
into Java Virtual Machine (JVM) byte code and executed against a standard JVM.
|
||||
This specification uses ANTLR4 grammar notation to describe the allowed syntax.
|
||||
However, the actual Painless grammar is more compact than what is shown here.
|
||||
|
||||
[float]
|
||||
[[comments]]
|
||||
==== Comments
|
||||
include::painless-comments.asciidoc[]
|
||||
|
||||
Painless supports both single-line and multi-line comments. You can include
|
||||
comments anywhere within a script.
|
||||
|
||||
Single-line comments are preceded by two slashes: `// comment`. They can be
|
||||
placed anywhere on a line. All characters from the two slashes to the end of
|
||||
the line are ignored.
|
||||
|
||||
Multi-line comments are preceded by a slash-star `/*` and closed by
|
||||
star-slash `*/`. Multi-line comments can start anywhere on a line. All
|
||||
characters from the opening `/*` to the closing `*/` are ignored.
|
||||
|
||||
*Examples:*
|
||||
|
||||
[source,Java]
|
||||
----
|
||||
// single-line comment
|
||||
|
||||
<code> // single-line comment
|
||||
|
||||
/* multi-
|
||||
line
|
||||
comment */
|
||||
|
||||
<code> /* multi-line
|
||||
comment */ <code>
|
||||
|
||||
<code> /* multi-line comment */ <code>
|
||||
----
|
||||
|
||||
[float]
|
||||
[[keywords]]
|
||||
==== Keywords
|
||||
|
||||
Painless reserves the following keywords for built-in language features.
|
||||
These keywords cannot be used in other contexts, such as identifiers.
|
||||
|
||||
[cols="^1,^1,^1,^1,^1"]
|
||||
|====
|
||||
| if | else | while | do | for
|
||||
| in | continue | break | return | new
|
||||
| try | catch | throw | this | instanceof
|
||||
|====
|
||||
include::painless-keywords.asciidoc[]
|
||||
|
||||
include::painless-literals.asciidoc[]
|
||||
|
||||
include::painless-variables.asciidoc[]
|
||||
|
||||
include::painless-types.asciidoc[]
|
||||
|
||||
include::painless-casting.asciidoc[]
|
||||
|
||||
include::painless-operators.asciidoc[]
|
||||
|
||||
include::painless-general-syntax.asciidoc[]
|
||||
|
|
|
@ -1,94 +1,143 @@
|
|||
[[literals]]
|
||||
[[painless-literals]]
|
||||
=== Literals
|
||||
|
||||
Literals are values that you can specify directly in Painless scripts.
|
||||
Use literals to specify different types of values directly in a script.
|
||||
|
||||
[[integers]]
|
||||
==== Integers
|
||||
|
||||
Specify integer literals in decimal, octal, or hex notation. Use the following
|
||||
single letter designations to specify the primitive type: `l` for `long`, `f`
|
||||
for `float`, and `d` for `double`. If not specified, the type defaults to
|
||||
`int` (with the exception of certain assignments described later).
|
||||
Use integer literals to specify an integer value in decimal, octal, or hex
|
||||
notation of the <<primitive-types, primitive types>> `int`, `long`, `float`,
|
||||
or `double`. Use the following single letter designations to specify the
|
||||
<<primitive-types, primitive type>>: `l` or `L` for `long`, `f` or `F` for
|
||||
`float`, and `d` or `D` for `double`. If not specified, the type defaults to
|
||||
`int`. Use `0` as a prefix to specify an integer literal as octal, and use
|
||||
`0x` or `0X` as a prefix to specify an integer literal as hex.
|
||||
|
||||
*Grammar:*
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
INTEGER: '-'? ( '0' | [1-9] [0-9]* ) [lLfFdD]?;
|
||||
OCTAL: '-'? '0' [0-7]+ [lL]?;
|
||||
HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?;
|
||||
OCTAL: '-'? '0' [0-7]+ [lL]?;
|
||||
HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?;
|
||||
----
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
*Examples*
|
||||
|
||||
Integer literals.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
0 // integer literal of 0
|
||||
0D // double literal of 0.0
|
||||
1234L // long literal of 1234
|
||||
-90F // float literal of -90.0
|
||||
-022 // integer literal of -18 specified in octal
|
||||
0xF2A // integer literal of 3882
|
||||
0 <1>
|
||||
0D <2>
|
||||
1234L <3>
|
||||
-90f <4>
|
||||
-022 <5>
|
||||
0xF2A <6>
|
||||
----
|
||||
|
||||
[[floating-point-values]]
|
||||
==== Floating Point Values
|
||||
<1> `int 0`
|
||||
<2> `double 0.0`
|
||||
<3> `long 1234`
|
||||
<4> `float -90.0`
|
||||
<5> `int -18` in octal
|
||||
<6> `int 3882` in hex
|
||||
|
||||
Specify floating point literals using the following single letter designations
|
||||
for the primitive type: `f` for `float` and `d` for `double`.
|
||||
If not specified, the type defaults to `double`.
|
||||
[[floats]]
|
||||
==== Floats
|
||||
|
||||
*Grammar:*
|
||||
Use floating point literals to specify a floating point value of the
|
||||
<<primitive-types, primitive types>> `float` or `double`. Use the following
|
||||
single letter designations to specify the <<primitive-types, primitive type>>:
|
||||
`f` or `F` for `float` and `d` or `D` for `double`. If not specified, the type defaults
|
||||
to `double`.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? ( [eE] [+\-]? [0-9]+ )? [fFdD]?;
|
||||
DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? EXPONENT? [fFdD]?;
|
||||
EXPONENT: ( [eE] [+\-]? [0-9]+ );
|
||||
----
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
*Examples*
|
||||
|
||||
Floating point literals.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
0.0 // double value of 0.0
|
||||
1E6 // double value of 1000000
|
||||
0.977777 // double value of 0.97777
|
||||
-126.34 // double value of -126.34
|
||||
89.9F // float value of 89.9
|
||||
0.0 <1>
|
||||
1E6 <2>
|
||||
0.977777 <3>
|
||||
-126.34 <4>
|
||||
89.9F <5>
|
||||
----
|
||||
|
||||
<1> `double 0.0`
|
||||
<2> `double 1000000.0` in exponent notation
|
||||
<3> `double 0.977777`
|
||||
<4> `double -126.34`
|
||||
<5> `float 89.9`
|
||||
|
||||
[[strings]]
|
||||
==== Strings
|
||||
|
||||
Specify literal string with either single or double quotes. In double-quoted
|
||||
literal strings, you can escape double-quotes with a backslash to include them
|
||||
in the string. Similarly, you escape single quotes with a backslash in
|
||||
single-quoted literal strings. Backslashes themselves also need to be
|
||||
escaped with a backslash.
|
||||
Use string literals to specify string values of the
|
||||
<<string-type, String type>> with either single-quotes or double-quotes.
|
||||
Use a `\"` token to include a double-quote as part of a double-quoted string
|
||||
literal. Use a `\'` token to include a single-quote as part of a single-quoted
|
||||
string literal. Use a `\\` token to include a backslash as part of any string
|
||||
literal.
|
||||
|
||||
*Grammar:*
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) | ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' );
|
||||
STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' )
|
||||
| ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' );
|
||||
----
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
*Examples*
|
||||
|
||||
String literals using single-quotes.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
"double-quoted String literal"
|
||||
'single-quoted String literal'
|
||||
"\"double-quoted String with escaped double-quotes\" and backslash: \\"
|
||||
'\'single-quoted String with escaped single-quotes\' and backslash \\'
|
||||
"double-quoted String with non-escaped 'single-quotes'"
|
||||
'single-quoted String with non-escaped "double-quotes"'
|
||||
'single-quoted string literal'
|
||||
'\'single-quoted string with escaped single-quotes\' and backslash \\'
|
||||
'single-quoted string with non-escaped "double-quotes"'
|
||||
----
|
||||
|
||||
[[char]]
|
||||
===== Char
|
||||
String literals using double-quotes.
|
||||
|
||||
You cannot directly specify character literals in Painless. However, you can
|
||||
cast single-character strings to char. Attempting to cast a multi-character
|
||||
string to a char throws an error.
|
||||
[source,Painless]
|
||||
----
|
||||
"double-quoted string literal"
|
||||
"\"double-quoted string with escaped double-quotes\" and backslash: \\"
|
||||
"double-quoted string with non-escaped 'single-quotes'"
|
||||
----
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
[[characters]]
|
||||
==== Characters
|
||||
|
||||
Use the <<painless-casting, casting operator>> to convert string literals or
|
||||
<<string-type, String>> values into <<primitive-types, char>> values.
|
||||
<<string-type, String>> values converted into
|
||||
<<primitive-types, char>> values must be exactly one character in length
|
||||
or an error will occur.
|
||||
|
||||
*Examples*
|
||||
|
||||
Casting string literals into <<primitive-types, char>> values.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
(char)"C"
|
||||
(char)'c'
|
||||
----
|
||||
----
|
||||
|
||||
Casting a <<string-type, String>> value into a <<primitive-types, char>> value.
|
||||
|
||||
[source,Painless]
|
||||
----
|
||||
String s = "s";
|
||||
char c = (char)s;
|
||||
----
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
[[painless-operators]]
|
||||
=== Operators
|
||||
|
||||
The following is a table of the available operators in Painless. Each operator will have further information and examples outside of the table. Many operators will have a promotion table as described by the documentation on promotion [MARK].
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[types]]
|
||||
=== Data Types
|
||||
[[painless-types]]
|
||||
=== Types
|
||||
|
||||
Painless supports both dynamic and static types. Static types are split into
|
||||
_primitive types_ and _reference types_.
|
||||
|
@ -267,176 +267,3 @@ def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to
|
|||
// a def array with a size of 4 and the
|
||||
// values i, l, f*d, and s
|
||||
----
|
||||
|
||||
[[casting]]
|
||||
=== Casting
|
||||
|
||||
Casting is the conversion of one type to another. Implicit casts are casts that
|
||||
occur automatically, such as during an assignment operation. Explicit casts are
|
||||
casts where you use the casting operator to explicitly convert one type to
|
||||
another. This is necessary during operations where the cast cannot be inferred.
|
||||
|
||||
To cast to a new type, precede the expression by the new type enclosed in
|
||||
parentheses, for example
|
||||
`(int)x`.
|
||||
|
||||
The following sections specify the implicit casts that can be performed and the
|
||||
explicit casts that are allowed. The only other permitted cast is casting
|
||||
a single character `String` to a `char`.
|
||||
|
||||
*Grammar:*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
cast: '(' TYPE ')' expression
|
||||
----
|
||||
|
||||
[[numeric-casting]]
|
||||
==== Numeric Casting
|
||||
|
||||
The following table shows the allowed implicit and explicit casts between
|
||||
numeric types. Read the table by row. To find out if you need to explicitly
|
||||
cast from type A to type B, find the row for type A and scan across to the
|
||||
column for type B.
|
||||
|
||||
IMPORTANT: Explicit casts between numeric types can result in some data loss. A
|
||||
smaller numeric type cannot necessarily accommodate the value from a larger
|
||||
numeric type. You might also lose precision when casting from integer types
|
||||
to floating point types.
|
||||
|
||||
|====
|
||||
| | byte | short | char | int | long | float | double
|
||||
| byte | | implicit | implicit | implicit | implicit | implicit | implicit
|
||||
| short | explicit | | explicit | implicit | implicit | implicit | implicit
|
||||
| char | explicit | explicit | | implicit | implicit | implicit | implicit
|
||||
| int | explicit | explicit | explicit | | implicit | implicit | implicit
|
||||
| long | explicit | explicit | explicit | explicit | | implicit | implicit
|
||||
| float | explicit | explicit | explicit | explicit | explicit | | implicit
|
||||
| double | explicit | explicit | explicit | explicit | explicit | explicit |
|
||||
|====
|
||||
|
||||
|
||||
Example(s)
|
||||
[source,Java]
|
||||
----
|
||||
int a = 1; // Declare int variable a and set it to the literal
|
||||
// value 1
|
||||
long b = a; // Declare long variable b and set it to int variable
|
||||
// a with an implicit cast to convert from int to long
|
||||
short c = (short)b; // Declare short variable c, explicitly cast b to a
|
||||
// short, and assign b to c
|
||||
byte d = a; // ERROR: Casting an int to a byte requires an explicit
|
||||
// cast
|
||||
double e = (double)a; // Explicitly cast int variable a to a double and assign
|
||||
// it to the double variable e. The explicit cast is
|
||||
// allowed, but it is not necessary.
|
||||
----
|
||||
|
||||
[[reference-casting]]
|
||||
==== Reference Casting
|
||||
|
||||
A reference type can be implicitly cast to another reference type as long as
|
||||
the type being cast _from_ is a descendant of the type being cast _to_. A
|
||||
reference type can be explicitly cast _to_ if the type being cast to is a
|
||||
descendant of the type being cast _from_.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
List x; // Declare List variable x
|
||||
ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a
|
||||
// newly allocated ArrayList [1]
|
||||
x = y; // Assign Arraylist y to List x using an
|
||||
// implicit cast
|
||||
y = (ArrayList)x; // Explicitly cast List x to an ArrayList and
|
||||
// assign it to ArrayList y
|
||||
x = (List)y; // Set List x to ArrayList y using an explicit
|
||||
// cast (the explicit cast is not necessary)
|
||||
y = x; // ERROR: List x cannot be implicitly cast to
|
||||
// an ArrayList, an explicit cast is required
|
||||
Map m = y; // ERROR: Cannot implicitly or explicitly cast [2]
|
||||
// an ArrayList to a Map, no relationship
|
||||
// exists between the two types.
|
||||
----
|
||||
[1] `ArrayList` is a descendant of the `List` type.
|
||||
[2] `Map` is unrelated to the `List` and `ArrayList` types.
|
||||
|
||||
[[def-type-casting]]
|
||||
==== def Type Casting
|
||||
All primitive and reference types can always be implicitly cast to
|
||||
`def`. While it is possible to explicitly cast to `def`, it is not necessary.
|
||||
|
||||
However, it is not always possible to implicitly cast a `def` to other
|
||||
primitive and reference types. An explicit cast is required if an explicit
|
||||
cast would normally be required between the non-def types.
|
||||
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
def x; // Declare def variable x and set it to null
|
||||
x = 3; // Set the def variable x to the literal 3 with an implicit
|
||||
// cast from int to def
|
||||
double a = x; // Declare double variable a and set it to def variable x,
|
||||
// which contains a double
|
||||
int b = x; // ERROR: Results in a run-time error because an explicit cast is
|
||||
// required to cast from a double to an int
|
||||
int c = (int)x; // Declare int variable c, explicitly cast def variable x to an
|
||||
// int, and assign x to c
|
||||
----
|
||||
|
||||
[[boxing-unboxing]]
|
||||
==== Boxing and Unboxing
|
||||
|
||||
Boxing is where a cast is used to convert a primitive type to its corresponding
|
||||
reference type. Unboxing is the reverse, converting a reference type to the
|
||||
corresponding primitive type.
|
||||
|
||||
There are two places Painless performs implicit boxing and unboxing:
|
||||
|
||||
* When you call methods, Painless automatically boxes and unboxes arguments
|
||||
so you can specify either primitive types or their corresponding reference
|
||||
types.
|
||||
* When you use the `def` type, Painless automatically boxes and unboxes as
|
||||
needed when converting to and from `def`.
|
||||
|
||||
The casting operator does not support any way to explicitly box a primitive
|
||||
type or unbox a reference type.
|
||||
|
||||
If a primitive type needs to be converted to a reference type, the Painless
|
||||
reference type API supports methods that can do that. However, under normal
|
||||
circumstances this should not be necessary.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
Integer x = 1; // ERROR: not a legal implicit cast
|
||||
Integer y = (Integer)1; // ERROR: not a legal explicit cast
|
||||
int a = new Integer(1); // ERROR: not a legal implicit cast
|
||||
int b = (int)new Integer(1); // ERROR: not a legal explicit cast
|
||||
----
|
||||
|
||||
[[promotion]]
|
||||
==== Promotion
|
||||
|
||||
Promotion is where certain operations require types to be either a minimum
|
||||
numerical type or for two (or more) types to be equivalent.
|
||||
The documentation for each operation that has these requirements
|
||||
includes promotion tables that describe how this is handled.
|
||||
|
||||
When an operation promotes a type or types, the resultant type
|
||||
of the operation is the promoted type. Types can be promoted to def
|
||||
at compile-time; however, at run-time, the resultant type will be the
|
||||
promotion of the types the `def` is representing.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal
|
||||
// 2 is promoted to a double and the resulting value is a double.
|
||||
|
||||
def x = 1; // Declare def variable x and set it to the literal int 1 through
|
||||
// an implicit cast
|
||||
x + 2.0F // Add def variable x and the literal float 2.0.
|
||||
// At compile-time the types are promoted to def.
|
||||
// At run-time the types are promoted to float.
|
||||
----
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
[[variables]]
|
||||
[[painless-variables]]
|
||||
=== Variables
|
||||
|
||||
Variables in Painless must be declared and can be statically or <<dynamic-types,
|
||||
dynamically typed>>.
|
||||
Variables in Painless must be declared and can be
|
||||
statically or <<dynamic-types, dynamically typed>>.
|
||||
|
||||
[[variable-identifiers]]
|
||||
==== Variable Identifiers
|
||||
[[identifiers]]
|
||||
==== Identifiers
|
||||
|
||||
Specify variable identifiers using the following grammar. Variable identifiers
|
||||
must start with a letter or underscore. You cannot use <<keywords, keywords>> or
|
||||
<<types, types>> as identifiers.
|
||||
must start with a letter or underscore. You cannot use
|
||||
<<painless-keywords, keywords>> or <<painless-types, types>> as identifiers.
|
||||
|
||||
*Grammar:*
|
||||
[source,ANTLR4]
|
||||
|
@ -20,7 +20,6 @@ ID: [_a-zA-Z] [_a-zA-Z-0-9]*;
|
|||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
_
|
||||
a
|
||||
Z
|
||||
id
|
||||
|
@ -30,8 +29,8 @@ MAP25
|
|||
_map25
|
||||
----
|
||||
|
||||
[[variable-declaration]]
|
||||
==== Variable Declaration
|
||||
[[declaration]]
|
||||
==== Declaration
|
||||
|
||||
Variables must be declared before you use them. The format is `type-name
|
||||
identifier-name`. To declare multiple variables of the same type, specify a
|
||||
|
@ -56,7 +55,7 @@ int i = 10; // Declare the int variable i and set it to the int literal 10
|
|||
----
|
||||
|
||||
[[variable-assignment]]
|
||||
==== Variable Assignment
|
||||
==== Assignment
|
||||
|
||||
Use the equals operator (`=`) to assign a value to a variable. The format is
|
||||
`identifier-name = value`. Any value expression can be assigned to any variable
|
||||
|
@ -80,7 +79,7 @@ int i; // Declare an int i
|
|||
i = 10; // Set the int i to the int literal 10
|
||||
----
|
||||
|
||||
Immediately assigning a value when declaring a variable.
|
||||
Immediately assigning a value when declaring a variable.
|
||||
|
||||
[source,Java]
|
||||
----
|
||||
|
|
|
@ -22,7 +22,6 @@ node-0 flush 0 0 0
|
|||
node-0 force_merge 0 0 0
|
||||
node-0 generic 0 0 0
|
||||
node-0 get 0 0 0
|
||||
node-0 index 0 0 0
|
||||
node-0 listener 0 0 0
|
||||
node-0 management 1 0 0
|
||||
node-0 refresh 0 0 0
|
||||
|
@ -52,7 +51,6 @@ flush
|
|||
force_merge
|
||||
generic
|
||||
get
|
||||
index
|
||||
listener
|
||||
management
|
||||
refresh
|
||||
|
|
|
@ -421,3 +421,108 @@ POST _reindex
|
|||
----
|
||||
// NOTCONSOLE
|
||||
|
||||
[float]
|
||||
=== Use `include_type_name=false` to prepare for upgrade to 8.0
|
||||
|
||||
Index creation, mappings and document APIs support the `include_type_name`
|
||||
option. When set to `false`, this option enables the behavior that will become
|
||||
default in 8.0 when types are removed. See some examples of interactions with
|
||||
Elasticsearch with this option turned off:
|
||||
|
||||
[float]
|
||||
==== Index creation
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index?include_type_name=false
|
||||
{
|
||||
"mappings": {
|
||||
"properties": { <1>
|
||||
"foo": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Mappings are included directly under the `mappings` key, without a type name.
|
||||
|
||||
[float]
|
||||
==== PUT and GET mappings
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
|
||||
PUT index/_mappings?include_type_name=false
|
||||
{
|
||||
"properties": { <1>
|
||||
"foo": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET index/_mappings?include_type_name=false
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Mappings are included directly under the `mappings` key, without a type name.
|
||||
|
||||
|
||||
The above call returns
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"index": {
|
||||
"mappings": {
|
||||
"properties": { <1>
|
||||
"foo": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> Mappings are included directly under the `mappings` key, without a type name.
|
||||
|
||||
[float]
|
||||
==== Document APIs
|
||||
|
||||
Index APIs must be call with the `{index}/_doc` path for automatic generation of
|
||||
the `_id` and `{index}/_doc/{id}` with explicit ids.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index/_doc/1?include_type_name=false
|
||||
{
|
||||
"foo": "bar"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_index": "index", <1>
|
||||
"_id": "1",
|
||||
"_version": 1,
|
||||
"result": "created",
|
||||
"_shards": {
|
||||
"total": 2,
|
||||
"successful": 1,
|
||||
"failed": 0
|
||||
},
|
||||
"_seq_no": 0,
|
||||
"_primary_term": 1
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> The response does not include a `_type`.
|
||||
|
||||
Likewise the <<docs-index_,GET>>, <<docs-delete,`DELETE`>>,
|
||||
<<docs-update,`_update`>> and <<search,`_search`>> APIs do not return a `_type`
|
||||
key in the response when `include_type_name` is set to `false`.
|
||||
|
|
|
@ -122,6 +122,11 @@ The following parameters are accepted by `geo_point` fields:
|
|||
ignored. If `false`, geo-points containing any more than latitude and longitude
|
||||
(two dimensions) values throw an exception and reject the whole document.
|
||||
|
||||
<<null-value,`null_value`>>::
|
||||
|
||||
Accepts an geopoint value which is substituted for any explicit `null` values.
|
||||
Defaults to `null`, which means the field is treated as missing.
|
||||
|
||||
==== Using geo-points in scripts
|
||||
|
||||
When accessing the value of a geo-point in a script, the value is returned as
|
||||
|
|
|
@ -5,4 +5,12 @@
|
|||
==== Percolator
|
||||
|
||||
* The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of
|
||||
the `index.percolator.map_unmapped_fields_as_text` setting.
|
||||
the `index.percolator.map_unmapped_fields_as_text` setting.
|
||||
|
||||
==== Index thread pool
|
||||
|
||||
* Internally, single-document index/delete/update requests are executed as bulk
|
||||
requests with a single-document payload. This means that these requests are
|
||||
executed on the bulk thread pool. As such, the indexing thread pool is no
|
||||
longer needed and has been removed. As such, the settings
|
||||
`thread_pool.index.size` and `thread_pool.index.queue_size` have been removed.
|
|
@ -13,12 +13,6 @@ There are several thread pools, but the important ones include:
|
|||
For generic operations (e.g., background node discovery).
|
||||
Thread pool type is `scaling`.
|
||||
|
||||
`index`::
|
||||
For index/delete operations. Thread pool type is `fixed`
|
||||
with a size of `# of available processors`,
|
||||
queue_size of `200`. The maximum size for this pool
|
||||
is `1 + # of available processors`.
|
||||
|
||||
`search`::
|
||||
For count/search/suggest operations. Thread pool type is
|
||||
`fixed_auto_queue_size` with a size of
|
||||
|
@ -55,13 +49,13 @@ There are several thread pools, but the important ones include:
|
|||
Mainly for java client executing of action when listener threaded is set to true.
|
||||
Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`.
|
||||
|
||||
Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `index`
|
||||
Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `bulk`
|
||||
thread pool to have more threads:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
thread_pool:
|
||||
index:
|
||||
bulk:
|
||||
size: 30
|
||||
--------------------------------------------------
|
||||
|
||||
|
@ -89,7 +83,7 @@ full, it will abort the request.
|
|||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
thread_pool:
|
||||
index:
|
||||
bulk:
|
||||
size: 30
|
||||
queue_size: 1000
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -489,7 +489,7 @@ Using `_index` in scripts has been replaced with writing `ScriptEngine` backends
|
|||
=== Painless Syntax
|
||||
|
||||
See the
|
||||
{painless}/painless-specification.html[Painless Language Specification]
|
||||
{painless}/painless-lang-spec.html[Painless Language Specification]
|
||||
in the guide to the {painless}/index.html[Painless Scripting Language].
|
||||
|
||||
[role="exclude",id="modules-scripting-painless-debugging"]
|
||||
|
|
|
@ -34,6 +34,7 @@ subprojects {
|
|||
Project depProject = dependencyToProject(dep)
|
||||
if (depProject != null
|
||||
&& false == depProject.path.equals(':libs:elasticsearch-core')
|
||||
&& false == isEclipse
|
||||
&& depProject.path.startsWith(':libs')) {
|
||||
throw new InvalidUserDataException("projects in :libs "
|
||||
+ "may not depend on other projects libs except "
|
||||
|
|
|
@ -687,8 +687,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
|||
* Tests recovery of an index with or without a translog and the
|
||||
* statistics we gather about that.
|
||||
*/
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29544")
|
||||
public void testRecovery() throws IOException {
|
||||
public void testRecovery() throws Exception {
|
||||
int count;
|
||||
boolean shouldHaveTranslog;
|
||||
if (runningAgainstOldCluster) {
|
||||
|
@ -701,7 +700,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
|||
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
|
||||
|
||||
// make sure all recoveries are done
|
||||
ensureNoInitializingShards();
|
||||
ensureGreen(index);
|
||||
// Explicitly flush so we're sure to have a bunch of documents in the Lucene index
|
||||
client().performRequest("POST", "/_flush");
|
||||
if (shouldHaveTranslog) {
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
- do:
|
||||
cat.thread_pool:
|
||||
thread_pool_patterns: bulk,management,flush,index,generic,force_merge
|
||||
thread_pool_patterns: bulk,management,flush,generic,force_merge
|
||||
h: id,name,active
|
||||
v: true
|
||||
|
||||
|
@ -44,7 +44,6 @@
|
|||
\S+\s+ flush \s+ \d+ \n
|
||||
\S+\s+ force_merge \s+ \d+ \n
|
||||
\S+\s+ generic \s+ \d+ \n
|
||||
\S+\s+ index \s+ \d+ \n
|
||||
\S+\s+ management \s+ \d+ \n)+ $/
|
||||
|
||||
- do:
|
||||
|
@ -72,12 +71,11 @@
|
|||
|
||||
- do:
|
||||
cat.thread_pool:
|
||||
thread_pool_patterns: bulk,index,search
|
||||
thread_pool_patterns: bulk,search
|
||||
size: ""
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/ #node_name name active queue rejected
|
||||
^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n
|
||||
\S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n
|
||||
\S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/
|
||||
|
|
|
@ -117,6 +117,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_9_ID = 5060999;
|
||||
public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_10_ID = 5061099;
|
||||
public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
|
@ -161,6 +163,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
public static final int V_6_2_4_ID = 6020499;
|
||||
public static final Version V_6_2_4 = new Version(V_6_2_4_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
public static final int V_6_2_5_ID = 6020599;
|
||||
public static final Version V_6_2_5 = new Version(V_6_2_5_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
public static final int V_6_3_0_ID = 6030099;
|
||||
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0);
|
||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||
|
@ -183,6 +187,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
return V_7_0_0_alpha1;
|
||||
case V_6_3_0_ID:
|
||||
return V_6_3_0;
|
||||
case V_6_2_5_ID:
|
||||
return V_6_2_5;
|
||||
case V_6_2_4_ID:
|
||||
return V_6_2_4;
|
||||
case V_6_2_3_ID:
|
||||
|
@ -219,6 +225,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_10_ID:
|
||||
return V_5_6_10;
|
||||
case V_5_6_9_ID:
|
||||
return V_5_6_9;
|
||||
case V_5_6_8_ID:
|
||||
|
|
|
@ -58,7 +58,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
PARSER.declareObject((r, t) -> r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT);
|
||||
}
|
||||
|
||||
private boolean flatSettings = false;
|
||||
private Settings transientSettings = EMPTY_SETTINGS;
|
||||
private Settings persistentSettings = EMPTY_SETTINGS;
|
||||
|
||||
|
@ -74,29 +73,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the value of "flat_settings".
|
||||
* Used only by the high-level REST client.
|
||||
*
|
||||
* @param flatSettings
|
||||
* value of "flat_settings" flag to be set
|
||||
* @return this request
|
||||
*/
|
||||
public ClusterUpdateSettingsRequest flatSettings(boolean flatSettings) {
|
||||
this.flatSettings = flatSettings;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return settings in flat format.
|
||||
* Used only by the high-level REST client.
|
||||
*
|
||||
* @return <code>true</code> if settings need to be returned in flat format; <code>false</code> otherwise.
|
||||
*/
|
||||
public boolean flatSettings() {
|
||||
return flatSettings;
|
||||
}
|
||||
|
||||
public Settings transientSettings() {
|
||||
return transientSettings;
|
||||
}
|
||||
|
|
|
@ -66,7 +66,6 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
|||
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS };
|
||||
private Feature[] features = DEFAULT_FEATURES;
|
||||
private boolean humanReadable = false;
|
||||
private transient boolean flatSettings = false;
|
||||
private transient boolean includeDefaults = false;
|
||||
|
||||
public GetIndexRequest() {
|
||||
|
@ -118,28 +117,6 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
|||
return humanReadable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the value of "flat_settings".
|
||||
* Used only by the high-level REST client.
|
||||
*
|
||||
* @param flatSettings value of "flat_settings" flag to be set
|
||||
* @return this request
|
||||
*/
|
||||
public GetIndexRequest flatSettings(boolean flatSettings) {
|
||||
this.flatSettings = flatSettings;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return settings in flat format.
|
||||
* Used only by the high-level REST client.
|
||||
*
|
||||
* @return <code>true</code> if settings need to be returned in flat format; <code>false</code> otherwise.
|
||||
*/
|
||||
public boolean flatSettings() {
|
||||
return flatSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the value of "include_defaults".
|
||||
* Used only by the high-level REST client.
|
||||
|
|
|
@ -55,7 +55,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
|||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
|
||||
private Settings settings = EMPTY_SETTINGS;
|
||||
private boolean preserveExisting = false;
|
||||
private boolean flatSettings = false;
|
||||
|
||||
public UpdateSettingsRequest() {
|
||||
}
|
||||
|
@ -75,29 +74,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
|||
this.settings = settings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the value of "flat_settings".
|
||||
* Used only by the high-level REST client.
|
||||
*
|
||||
* @param flatSettings
|
||||
* value of "flat_settings" flag to be set
|
||||
* @return this request
|
||||
*/
|
||||
public UpdateSettingsRequest flatSettings(boolean flatSettings) {
|
||||
this.flatSettings = flatSettings;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return settings in flat format.
|
||||
* Used only by the high-level REST client.
|
||||
*
|
||||
* @return <code>true</code> if settings need to be returned in flat format; <code>false</code> otherwise.
|
||||
*/
|
||||
public boolean flatSettings() {
|
||||
return flatSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TransportDeleteAction extends TransportSingleItemBulkWriteAction<De
|
|||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) {
|
||||
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX,
|
||||
actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.BULK,
|
||||
bulkAction, shardBulkAction);
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ public class TransportIndexAction extends TransportSingleItemBulkWriteAction<Ind
|
|||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) {
|
||||
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX,
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.BULK,
|
||||
bulkAction, shardBulkAction);
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.INDEX;
|
||||
return ThreadPool.Names.BULK;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,11 +19,9 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -57,7 +55,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
|
@ -78,12 +75,11 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -475,9 +471,11 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
|
||||
// now, update the mappings with the actual source
|
||||
Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
|
||||
for (DocumentMapper mapper : mapperService.docMappers(true)) {
|
||||
MappingMetaData mappingMd = new MappingMetaData(mapper);
|
||||
mappingsMetaData.put(mapper.type(), mappingMd);
|
||||
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||
if (mapper != null) {
|
||||
MappingMetaData mappingMd = new MappingMetaData(mapper);
|
||||
mappingsMetaData.put(mapper.type(), mappingMd);
|
||||
}
|
||||
}
|
||||
|
||||
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index())
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||
|
@ -49,6 +47,7 @@ import org.elasticsearch.indices.InvalidTypeNameException;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -175,10 +174,13 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
String index = indexService.index().getName();
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
final String type = mapper.type();
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
MapperService mapperService = indexService.mapperService();
|
||||
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||
if (mapper != null) {
|
||||
final String type = mapper.type();
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,8 +188,10 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
if (updatedTypes.isEmpty() == false) {
|
||||
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||
if (mapper != null) {
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -320,8 +324,10 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
// Mapping updates on a single type may have side-effects on other types so we need to
|
||||
// update mapping metadata on all types
|
||||
for (DocumentMapper mapper : mapperService.docMappers(true)) {
|
||||
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
|
||||
for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) {
|
||||
if (mapper != null) {
|
||||
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
|
||||
}
|
||||
}
|
||||
builder.put(indexMetaDataBuilder);
|
||||
}
|
||||
|
|
|
@ -24,9 +24,14 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
|||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||
import org.apache.lucene.util.SloppyMath;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.fielddata.FieldData;
|
||||
import org.elasticsearch.index.fielddata.GeoPointValues;
|
||||
|
@ -36,6 +41,7 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
|||
import org.elasticsearch.index.fielddata.SortingNumericDoubleValues;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
public class GeoUtils {
|
||||
|
||||
|
@ -351,6 +357,36 @@ public class GeoUtils {
|
|||
return parseGeoPoint(parser, point, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the value as a geopoint. The following types of values are supported:
|
||||
* <p>
|
||||
* Object: has to contain either lat and lon or geohash fields
|
||||
* <p>
|
||||
* String: expected to be in "latitude, longitude" format or a geohash
|
||||
* <p>
|
||||
* Array: two or more elements, the first element is longitude, the second is latitude, the rest is ignored if ignoreZValue is true
|
||||
*/
|
||||
public static GeoPoint parseGeoPoint(Object value, final boolean ignoreZValue) throws ElasticsearchParseException {
|
||||
try {
|
||||
XContentBuilder content = JsonXContent.contentBuilder();
|
||||
content.startObject();
|
||||
content.field("null_value", value);
|
||||
content.endObject();
|
||||
|
||||
try (InputStream stream = BytesReference.bytes(content).streamInput();
|
||||
XContentParser parser = JsonXContent.jsonXContent.createParser(
|
||||
NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) {
|
||||
parser.nextToken(); // start object
|
||||
parser.nextToken(); // field name
|
||||
parser.nextToken(); // field value
|
||||
return parseGeoPoint(parser, new GeoPoint(), ignoreZValue);
|
||||
}
|
||||
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchParseException("error parsing geopoint", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms:
|
||||
*
|
||||
|
|
|
@ -697,8 +697,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) {
|
||||
for (IndexShard shard : this.shards.values()) {
|
||||
try {
|
||||
Translog translog = shard.getTranslog();
|
||||
if (translog.syncNeeded()) {
|
||||
if (shard.isSyncNeeded()) {
|
||||
shard.sync();
|
||||
}
|
||||
} catch (AlreadyClosedException ex) {
|
||||
|
|
|
@ -121,7 +121,8 @@ public final class IndexWarmer extends AbstractComponent {
|
|||
public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
DocumentMapper docMapper = mapperService.documentMapper();
|
||||
if (docMapper != null) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final MappedFieldType fieldType = fieldMapper.fieldType();
|
||||
final String indexName = fieldType.name();
|
||||
|
|
|
@ -233,7 +233,8 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
|
|||
boolean hasNested = false;
|
||||
final Set<Query> warmUp = new HashSet<>();
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
DocumentMapper docMapper = mapperService.documentMapper();
|
||||
if (docMapper != null) {
|
||||
if (docMapper.hasNestedObjects()) {
|
||||
hasNested = true;
|
||||
for (ObjectMapper objectMapper : docMapper.objectMappers().values()) {
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -510,8 +511,18 @@ public abstract class Engine implements Closeable {
|
|||
EXTERNAL, INTERNAL
|
||||
}
|
||||
|
||||
/** returns the translog for this engine */
|
||||
public abstract Translog getTranslog();
|
||||
/**
|
||||
* Returns the translog associated with this engine.
|
||||
* Prefer to keep the translog package-private, so that an engine can control all accesses to the translog.
|
||||
*/
|
||||
abstract Translog getTranslog();
|
||||
|
||||
/**
|
||||
* Checks if the underlying storage sync is required.
|
||||
*/
|
||||
public boolean isTranslogSyncNeeded() {
|
||||
return getTranslog().syncNeeded();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures that all locations in the given stream have been written to the underlying storage.
|
||||
|
@ -520,6 +531,36 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
public abstract void syncTranslog() throws IOException;
|
||||
|
||||
public Closeable acquireTranslogRetentionLock() {
|
||||
return getTranslog().acquireRetentionLock();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new translog snapshot from this engine for reading translog operations whose seq# in the provided range.
|
||||
* The caller has to close the returned snapshot after finishing the reading.
|
||||
*/
|
||||
public Translog.Snapshot newTranslogSnapshotBetween(long minSeqNo, long maxSeqNo) throws IOException {
|
||||
return getTranslog().getSnapshotBetween(minSeqNo, maxSeqNo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the estimated number of translog operations in this engine whose seq# at least the provided seq#.
|
||||
*/
|
||||
public int estimateTranslogOperationsFromMinSeq(long minSeqNo) {
|
||||
return getTranslog().estimateTotalOperationsFromMinSeq(minSeqNo);
|
||||
}
|
||||
|
||||
public TranslogStats getTranslogStats() {
|
||||
return getTranslog().stats();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the last location that the translog of this engine has written into.
|
||||
*/
|
||||
public Translog.Location getTranslogLastWriteLocation() {
|
||||
return getTranslog().getLastWriteLocation();
|
||||
}
|
||||
|
||||
protected final void ensureOpen(Exception suppressed) {
|
||||
if (isClosed.get()) {
|
||||
AlreadyClosedException ace = new AlreadyClosedException(shardId + " engine is closed", failedEngine.get());
|
||||
|
@ -548,6 +589,13 @@ public abstract class Engine implements Closeable {
|
|||
*/
|
||||
public abstract LocalCheckpointTracker getLocalCheckpointTracker();
|
||||
|
||||
/**
|
||||
* Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint)
|
||||
*/
|
||||
public long getLastSyncedGlobalCheckpoint() {
|
||||
return getTranslog().getLastSyncedGlobalCheckpoint();
|
||||
}
|
||||
|
||||
/**
|
||||
* Global stats on segments.
|
||||
*/
|
||||
|
@ -812,6 +860,16 @@ public abstract class Engine implements Closeable {
|
|||
*/
|
||||
public abstract void trimTranslog() throws EngineException;
|
||||
|
||||
/**
|
||||
* Tests whether or not the translog generation should be rolled to a new generation.
|
||||
* This test is based on the size of the current generation compared to the configured generation threshold size.
|
||||
*
|
||||
* @return {@code true} if the current generation should be rolled to a new generation
|
||||
*/
|
||||
public boolean shouldRollTranslogGeneration() {
|
||||
return getTranslog().shouldRollGeneration();
|
||||
}
|
||||
|
||||
/**
|
||||
* Rolls the translog generation and cleans unneeded.
|
||||
*/
|
||||
|
|
|
@ -426,7 +426,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Translog getTranslog() {
|
||||
Translog getTranslog() {
|
||||
ensureOpen();
|
||||
return translog;
|
||||
}
|
||||
|
|
|
@ -631,23 +631,9 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
|
|||
return values[index].get().utf8ToString();
|
||||
}
|
||||
|
||||
public BytesRef getBytesValue() {
|
||||
if (size() > 0) {
|
||||
return values[0].get();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
BytesRef value = getBytesValue();
|
||||
if (value == null) {
|
||||
return null;
|
||||
} else {
|
||||
return value.utf8ToString();
|
||||
}
|
||||
return count == 0 ? null : get(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static final class BytesRefs extends BinaryScriptDocValues<BytesRef> {
|
||||
|
@ -658,14 +644,16 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
|
|||
|
||||
@Override
|
||||
public BytesRef get(int index) {
|
||||
return values[index].get();
|
||||
/**
|
||||
* We need to make a copy here because {@link BinaryScriptDocValues} might reuse the
|
||||
* returned value and the same instance might be used to
|
||||
* return values from multiple documents.
|
||||
**/
|
||||
return values[index].toBytesRef();
|
||||
}
|
||||
|
||||
public BytesRef getValue() {
|
||||
if (count == 0) {
|
||||
return new BytesRef();
|
||||
}
|
||||
return values[0].get();
|
||||
return count == 0 ? new BytesRef() : get(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
|||
public static class Names {
|
||||
public static final String IGNORE_MALFORMED = "ignore_malformed";
|
||||
public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value");
|
||||
public static final String NULL_VALUE = "null_value";
|
||||
}
|
||||
|
||||
public static class Defaults {
|
||||
|
@ -134,7 +135,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
|||
throws MapperParsingException {
|
||||
Builder builder = new GeoPointFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
|
||||
Object nullValue = null;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = entry.getKey();
|
||||
|
@ -147,9 +148,31 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
|||
builder.ignoreZValue(XContentMapValues.nodeBooleanValue(propNode,
|
||||
name + "." + Names.IGNORE_Z_VALUE.getPreferredName()));
|
||||
iterator.remove();
|
||||
} else if (propName.equals(Names.NULL_VALUE)) {
|
||||
if (propNode == null) {
|
||||
throw new MapperParsingException("Property [null_value] cannot be null.");
|
||||
}
|
||||
nullValue = propNode;
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
if (nullValue != null) {
|
||||
boolean ignoreZValue = builder.ignoreZValue == null ? Defaults.IGNORE_Z_VALUE.value() : builder.ignoreZValue;
|
||||
boolean ignoreMalformed = builder.ignoreMalformed == null ? Defaults.IGNORE_MALFORMED.value() : builder.ignoreZValue;
|
||||
GeoPoint point = GeoUtils.parseGeoPoint(nullValue, ignoreZValue);
|
||||
if (ignoreMalformed == false) {
|
||||
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||
throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "]");
|
||||
}
|
||||
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||
throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "]");
|
||||
}
|
||||
} else {
|
||||
GeoUtils.normalizePoint(point);
|
||||
}
|
||||
builder.nullValue(point);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -318,7 +341,11 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
|||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value()));
|
||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
||||
} else if (token == XContentParser.Token.VALUE_NULL) {
|
||||
if (fieldType.nullValue() != null) {
|
||||
parse(context, (GeoPoint) fieldType.nullValue());
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse));
|
||||
} catch (ElasticsearchParseException e) {
|
||||
|
@ -337,11 +364,15 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
|||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
if (includeDefaults || ignoreMalformed.explicit()) {
|
||||
builder.field(GeoPointFieldMapper.Names.IGNORE_MALFORMED, ignoreMalformed.value());
|
||||
builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value());
|
||||
}
|
||||
if (includeDefaults || ignoreZValue.explicit()) {
|
||||
builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value());
|
||||
}
|
||||
|
||||
if (includeDefaults || fieldType().nullValue() != null) {
|
||||
builder.field(Names.NULL_VALUE, fieldType().nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
public Explicit<Boolean> ignoreZValue() {
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -64,13 +65,12 @@ import java.util.HashSet;
|
|||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
|
@ -121,7 +121,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
private volatile String defaultMappingSource;
|
||||
|
||||
private volatile Map<String, DocumentMapper> mappers = emptyMap();
|
||||
private volatile DocumentMapper mapper;
|
||||
private volatile DocumentMapper defaultMapper;
|
||||
|
||||
private volatile FieldTypeLookup fieldTypes;
|
||||
private volatile Map<String, ObjectMapper> fullPathObjectMappers = emptyMap();
|
||||
|
@ -166,24 +167,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return this.hasNested;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns an immutable iterator over current document mappers.
|
||||
*
|
||||
* @param includingDefaultMapping indicates whether the iterator should contain the {@link #DEFAULT_MAPPING} document mapper.
|
||||
* As is this not really an active type, you would typically set this to false
|
||||
*/
|
||||
public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
|
||||
return () -> {
|
||||
final Collection<DocumentMapper> documentMappers;
|
||||
if (includingDefaultMapping) {
|
||||
documentMappers = mappers.values();
|
||||
} else {
|
||||
documentMappers = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).collect(Collectors.toList());
|
||||
}
|
||||
return Collections.unmodifiableCollection(documentMappers).iterator();
|
||||
};
|
||||
}
|
||||
|
||||
public IndexAnalyzers getIndexAnalyzers() {
|
||||
return this.indexAnalyzers;
|
||||
}
|
||||
|
@ -212,7 +195,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
|
||||
assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex();
|
||||
// go over and add the relevant mappings (or update them)
|
||||
final Set<String> existingMappers = new HashSet<>(mappers.keySet());
|
||||
Set<String> existingMappers = new HashSet<>();
|
||||
if (mapper != null) {
|
||||
existingMappers.add(mapper.type());
|
||||
}
|
||||
if (defaultMapper != null) {
|
||||
existingMappers.add(DEFAULT_MAPPING);
|
||||
}
|
||||
final Map<String, DocumentMapper> updatedEntries;
|
||||
try {
|
||||
// only update entries if needed
|
||||
|
@ -314,29 +303,32 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
defaultMappingSourceOrLastStored = this.defaultMappingSource;
|
||||
}
|
||||
|
||||
List<DocumentMapper> documentMappers = new ArrayList<>();
|
||||
DocumentMapper documentMapper = null;
|
||||
for (Map.Entry<String, CompressedXContent> entry : mappings.entrySet()) {
|
||||
String type = entry.getKey();
|
||||
if (type.equals(DEFAULT_MAPPING)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (documentMapper != null) {
|
||||
throw new IllegalArgumentException("Cannot put multiple mappings: " + mappings.keySet());
|
||||
}
|
||||
|
||||
final boolean applyDefault =
|
||||
// the default was already applied if we are recovering
|
||||
reason != MergeReason.MAPPING_RECOVERY
|
||||
// only apply the default mapping if we don't have the type yet
|
||||
&& mappers.containsKey(type) == false;
|
||||
&& this.mapper == null;
|
||||
|
||||
try {
|
||||
DocumentMapper documentMapper =
|
||||
documentMapper =
|
||||
documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null);
|
||||
documentMappers.add(documentMapper);
|
||||
} catch (Exception e) {
|
||||
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason);
|
||||
return internalMerge(defaultMapper, defaultMappingSource, documentMapper, reason);
|
||||
}
|
||||
|
||||
static void validateTypeName(String type) {
|
||||
|
@ -361,13 +353,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
|
||||
List<DocumentMapper> documentMappers, MergeReason reason) {
|
||||
DocumentMapper mapper, MergeReason reason) {
|
||||
boolean hasNested = this.hasNested;
|
||||
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
|
||||
FieldTypeLookup fieldTypes = this.fieldTypes;
|
||||
Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
|
||||
|
||||
Map<String, DocumentMapper> results = new LinkedHashMap<>(documentMappers.size() + 1);
|
||||
Map<String, DocumentMapper> results = new LinkedHashMap<>(2);
|
||||
|
||||
if (defaultMapper != null) {
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
|
@ -378,27 +369,23 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
"cannot have more than one type");
|
||||
}
|
||||
assert defaultMapper.type().equals(DEFAULT_MAPPING);
|
||||
mappers.put(DEFAULT_MAPPING, defaultMapper);
|
||||
results.put(DEFAULT_MAPPING, defaultMapper);
|
||||
}
|
||||
|
||||
{
|
||||
Set<String> actualTypes = new HashSet<>(mappers.keySet());
|
||||
documentMappers.forEach(mapper -> actualTypes.add(mapper.type()));
|
||||
actualTypes.remove(DEFAULT_MAPPING);
|
||||
if (actualTypes.size() > 1) {
|
||||
if (mapper != null && this.mapper != null && Objects.equals(this.mapper.type(), mapper.type()) == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes);
|
||||
"Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + Arrays.asList(this.mapper.type(), mapper.type()));
|
||||
}
|
||||
}
|
||||
|
||||
for (DocumentMapper mapper : documentMappers) {
|
||||
DocumentMapper newMapper = null;
|
||||
if (mapper != null) {
|
||||
// check naming
|
||||
validateTypeName(mapper.type());
|
||||
|
||||
// compute the merged DocumentMapper
|
||||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper oldMapper = this.mapper;
|
||||
if (oldMapper != null) {
|
||||
newMapper = oldMapper.merge(mapper.mapping());
|
||||
} else {
|
||||
|
@ -442,7 +429,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
results.put(newMapper.type(), newMapper);
|
||||
mappers.put(newMapper.type(), newMapper);
|
||||
}
|
||||
|
||||
if (reason == MergeReason.MAPPING_UPDATE) {
|
||||
|
@ -456,24 +442,16 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
checkIndexSortCompatibility(indexSettings.getIndexSortConfig(), hasNested);
|
||||
|
||||
for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
|
||||
if (entry.getKey().equals(DEFAULT_MAPPING)) {
|
||||
continue;
|
||||
}
|
||||
DocumentMapper documentMapper = entry.getValue();
|
||||
// apply changes to the field types back
|
||||
DocumentMapper updatedDocumentMapper = documentMapper.updateFieldType(fieldTypes.fullNameToFieldType);
|
||||
if (updatedDocumentMapper != documentMapper) {
|
||||
if (newMapper != null) {
|
||||
DocumentMapper updatedDocumentMapper = newMapper.updateFieldType(fieldTypes.fullNameToFieldType);
|
||||
if (updatedDocumentMapper != newMapper) {
|
||||
// update both mappers and result
|
||||
entry.setValue(updatedDocumentMapper);
|
||||
if (results.containsKey(updatedDocumentMapper.type())) {
|
||||
results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
|
||||
}
|
||||
newMapper = updatedDocumentMapper;
|
||||
results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
|
||||
}
|
||||
}
|
||||
|
||||
// make structures immutable
|
||||
mappers = Collections.unmodifiableMap(mappers);
|
||||
results = Collections.unmodifiableMap(results);
|
||||
|
||||
// only need to immutably rewrap these if the previous reference was changed.
|
||||
|
@ -486,7 +464,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
if (defaultMappingSource != null) {
|
||||
this.defaultMappingSource = defaultMappingSource;
|
||||
}
|
||||
this.mappers = mappers;
|
||||
if (newMapper != null) {
|
||||
this.mapper = newMapper;
|
||||
}
|
||||
this.defaultMapper = defaultMapper;
|
||||
this.fieldTypes = fieldTypes;
|
||||
this.hasNested = hasNested;
|
||||
this.fullPathObjectMappers = fullPathObjectMappers;
|
||||
|
@ -498,7 +479,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
private boolean assertMappersShareSameFieldType() {
|
||||
for (DocumentMapper mapper : docMappers(false)) {
|
||||
if (mapper != null) {
|
||||
List<FieldMapper> fieldMappers = new ArrayList<>();
|
||||
Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
|
||||
MapperUtils.collect(mapper.root(), new ArrayList<>(), fieldMappers);
|
||||
|
@ -692,18 +673,20 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
|
||||
}
|
||||
|
||||
public boolean hasMapping(String mappingType) {
|
||||
return mappers.containsKey(mappingType);
|
||||
/**
|
||||
* Get the set of types.
|
||||
* @deprecated Indices may have one type at most, use {@link #documentMapper()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public Set<String> types() {
|
||||
return mapper == null ? Collections.emptySet() : Collections.singleton(mapper.type());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of concrete types that have a mapping.
|
||||
* NOTE: this does not return the default mapping.
|
||||
* Return the document mapper, or {@code null} if no mapping has been put yet.
|
||||
*/
|
||||
public Collection<String> types() {
|
||||
final Set<String> types = new HashSet<>(mappers.keySet());
|
||||
types.remove(DEFAULT_MAPPING);
|
||||
return Collections.unmodifiableSet(types);
|
||||
public DocumentMapper documentMapper() {
|
||||
return mapper;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -712,7 +695,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
* the default mapping.
|
||||
*/
|
||||
public DocumentMapper documentMapper(String type) {
|
||||
return mappers.get(type);
|
||||
if (mapper != null && type.equals(mapper.type())) {
|
||||
return mapper;
|
||||
}
|
||||
if (DEFAULT_MAPPING.equals(type)) {
|
||||
return defaultMapper;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -720,7 +709,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
* type has been dynamically created.
|
||||
*/
|
||||
public DocumentMapperForType documentMapperWithAutoCreate(String type) {
|
||||
DocumentMapper mapper = mappers.get(type);
|
||||
DocumentMapper mapper = documentMapper(type);
|
||||
if (mapper != null) {
|
||||
return new DocumentMapperForType(mapper, null);
|
||||
}
|
||||
|
@ -836,7 +825,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
/** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */
|
||||
public Term createUidTerm(String type, String id) {
|
||||
if (hasMapping(type) == false) {
|
||||
if (mapper == null || mapper.type().equals(type) == false) {
|
||||
return null;
|
||||
}
|
||||
return new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
|
|
|
@ -89,7 +89,8 @@ public final class QueryParserHelper {
|
|||
* @param field The field name to search.
|
||||
*/
|
||||
public static FieldMapper getFieldMapper(MapperService mapperService, String field) {
|
||||
for (DocumentMapper mapper : mapperService.docMappers(true)) {
|
||||
DocumentMapper mapper = mapperService.documentMapper();
|
||||
if (mapper != null) {
|
||||
FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper(field);
|
||||
if (fieldMapper != null) {
|
||||
return fieldMapper;
|
||||
|
|
|
@ -131,7 +131,7 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction<
|
|||
|
||||
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException {
|
||||
if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST &&
|
||||
indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
|
||||
indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
|
||||
indexShard.sync();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -933,7 +933,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public TranslogStats translogStats() {
|
||||
return getEngine().getTranslog().stats();
|
||||
return getEngine().getTranslogStats();
|
||||
}
|
||||
|
||||
public CompletionStats completionStats(String... fields) {
|
||||
|
@ -1330,7 +1330,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
protected void onNewEngine(Engine newEngine) {
|
||||
refreshListeners.setTranslog(newEngine.getTranslog());
|
||||
refreshListeners.setCurrentRefreshLocationSupplier(newEngine::getTranslogLastWriteLocation);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1562,8 +1562,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
final Engine engine = getEngineOrNull();
|
||||
if (engine != null) {
|
||||
try {
|
||||
final Translog translog = engine.getTranslog();
|
||||
return translog.shouldRollGeneration();
|
||||
return engine.shouldRollTranslogGeneration();
|
||||
} catch (final AlreadyClosedException e) {
|
||||
// we are already closed, no need to flush or roll
|
||||
}
|
||||
|
@ -1578,9 +1577,30 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a lock on the translog files, preventing them from being trimmed.
|
||||
*/
|
||||
public Closeable acquireTranslogRetentionLock() {
|
||||
Engine engine = getEngine();
|
||||
return engine.getTranslog().acquireRetentionLock();
|
||||
return getEngine().acquireTranslogRetentionLock();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new translog snapshot for reading translog operations whose seq# at least the provided seq#.
|
||||
* The caller has to close the returned snapshot after finishing the reading.
|
||||
*/
|
||||
public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException {
|
||||
return newTranslogSnapshotBetween(minSeqNo, Long.MAX_VALUE);
|
||||
}
|
||||
|
||||
public Translog.Snapshot newTranslogSnapshotBetween(long minSeqNo, long maxSeqNo) throws IOException {
|
||||
return getEngine().newTranslogSnapshotBetween(minSeqNo, maxSeqNo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the estimated number of operations in translog whose seq# at least the provided seq#.
|
||||
*/
|
||||
public int estimateTranslogOperationsFromMinSeq(long minSeqNo) {
|
||||
return getEngine().estimateTranslogOperationsFromMinSeq(minSeqNo);
|
||||
}
|
||||
|
||||
public List<Segment> segments(boolean verbose) {
|
||||
|
@ -1591,10 +1611,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
getEngine().flushAndClose();
|
||||
}
|
||||
|
||||
public Translog getTranslog() {
|
||||
return getEngine().getTranslog();
|
||||
}
|
||||
|
||||
public String getHistoryUUID() {
|
||||
return getEngine().getHistoryUUID();
|
||||
}
|
||||
|
@ -1732,6 +1748,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
return replicationTracker.getGlobalCheckpoint();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint)
|
||||
*/
|
||||
public long getLastSyncedGlobalCheckpoint() {
|
||||
return getEngine().getLastSyncedGlobalCheckpoint();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the local knowledge of the global checkpoints for all in-sync allocation IDs.
|
||||
*
|
||||
|
@ -2307,6 +2330,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
getEngine().syncTranslog();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the underlying storage sync is required.
|
||||
*/
|
||||
public boolean isSyncNeeded() {
|
||||
return getEngine().isTranslogSyncNeeded();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current translog durability mode
|
||||
*/
|
||||
|
@ -2466,7 +2496,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
private void setRefreshPending(Engine engine) {
|
||||
Translog.Location lastWriteLocation = engine.getTranslog().getLastWriteLocation();
|
||||
Translog.Location lastWriteLocation = engine.getTranslogLastWriteLocation();
|
||||
Translog.Location location;
|
||||
do {
|
||||
location = this.pendingRefreshLocation.get();
|
||||
|
|
|
@ -83,7 +83,7 @@ public class PrimaryReplicaSyncer extends AbstractComponent {
|
|||
ActionListener<ResyncTask> resyncListener = null;
|
||||
try {
|
||||
final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1;
|
||||
Translog.Snapshot snapshot = indexShard.getTranslog().newSnapshotFrom(startingSeqNo);
|
||||
Translog.Snapshot snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo);
|
||||
resyncListener = new ActionListener<ResyncTask>() {
|
||||
@Override
|
||||
public void onResponse(final ResyncTask resyncTask) {
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.List;
|
|||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.IntSupplier;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
|
@ -153,21 +154,20 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener,
|
|||
/**
|
||||
* Setup the translog used to find the last refreshed location.
|
||||
*/
|
||||
public void setTranslog(Translog translog) {
|
||||
this.translog = translog;
|
||||
public void setCurrentRefreshLocationSupplier(Supplier<Translog.Location> currentRefreshLocationSupplier) {
|
||||
this.currentRefreshLocationSupplier = currentRefreshLocationSupplier;
|
||||
}
|
||||
|
||||
// Implementation of ReferenceManager.RefreshListener that adapts Lucene's RefreshListener into Elasticsearch's refresh listeners.
|
||||
private Translog translog;
|
||||
/**
|
||||
* Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile
|
||||
* because when it is used by the refreshing thread.
|
||||
*/
|
||||
private Translog.Location currentRefreshLocation;
|
||||
private Supplier<Translog.Location> currentRefreshLocationSupplier;
|
||||
|
||||
@Override
|
||||
public void beforeRefresh() throws IOException {
|
||||
currentRefreshLocation = translog.getLastWriteLocation();
|
||||
currentRefreshLocation = currentRefreshLocationSupplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -145,9 +145,6 @@ public class RecoverySourceHandler {
|
|||
}, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ");
|
||||
|
||||
try (Closeable ignored = shard.acquireTranslogRetentionLock()) {
|
||||
|
||||
final Translog translog = shard.getTranslog();
|
||||
|
||||
final long startingSeqNo;
|
||||
final long requiredSeqNoRangeStart;
|
||||
final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO &&
|
||||
|
@ -170,7 +167,7 @@ public class RecoverySourceHandler {
|
|||
requiredSeqNoRangeStart =
|
||||
Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1;
|
||||
try {
|
||||
phase1(phase1Snapshot.getIndexCommit(), translog::totalOperations);
|
||||
phase1(phase1Snapshot.getIndexCommit(), () -> shard.estimateTranslogOperationsFromMinSeq(startingSeqNo));
|
||||
} catch (final Exception e) {
|
||||
throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e);
|
||||
} finally {
|
||||
|
@ -187,7 +184,7 @@ public class RecoverySourceHandler {
|
|||
|
||||
try {
|
||||
// For a sequence based recovery, the target can keep its local translog
|
||||
prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, translog.estimateTotalOperationsFromMinSeq(startingSeqNo));
|
||||
prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, shard.estimateTranslogOperationsFromMinSeq(startingSeqNo));
|
||||
} catch (final Exception e) {
|
||||
throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e);
|
||||
}
|
||||
|
@ -210,9 +207,9 @@ public class RecoverySourceHandler {
|
|||
|
||||
logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo);
|
||||
|
||||
logger.trace("snapshot translog for recovery; current size is [{}]", translog.estimateTotalOperationsFromMinSeq(startingSeqNo));
|
||||
logger.trace("snapshot translog for recovery; current size is [{}]", shard.estimateTranslogOperationsFromMinSeq(startingSeqNo));
|
||||
final long targetLocalCheckpoint;
|
||||
try(Translog.Snapshot snapshot = translog.newSnapshotFrom(startingSeqNo)) {
|
||||
try(Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) {
|
||||
targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
|
||||
} catch (Exception e) {
|
||||
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
|
||||
|
@ -261,7 +258,7 @@ public class RecoverySourceHandler {
|
|||
// the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one
|
||||
if (startingSeqNo - 1 <= localCheckpoint) {
|
||||
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1);
|
||||
try (Translog.Snapshot snapshot = shard.getTranslog().newSnapshotFrom(startingSeqNo)) {
|
||||
try (Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) {
|
||||
Translog.Operation operation;
|
||||
while ((operation = snapshot.next()) != null) {
|
||||
if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
|
|
|
@ -48,7 +48,7 @@ public abstract class ExecutorBuilder<U extends ExecutorBuilder.ExecutorSettings
|
|||
}
|
||||
|
||||
protected int applyHardSizeLimit(final Settings settings, final String name) {
|
||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
||||
if (name.equals(ThreadPool.Names.BULK)) {
|
||||
return 1 + EsExecutors.numberOfProcessors(settings);
|
||||
} else {
|
||||
return Integer.MAX_VALUE;
|
||||
|
|
|
@ -49,20 +49,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
|
|||
* @param queueSize the size of the backing queue, -1 for unbounded
|
||||
*/
|
||||
FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) {
|
||||
this(settings, name, size, queueSize, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name.
|
||||
*
|
||||
* @param settings the node-level settings
|
||||
* @param name the name of the executor
|
||||
* @param size the fixed number of threads
|
||||
* @param queueSize the size of the backing queue, -1 for unbounded
|
||||
* @param deprecated whether or not the thread pool is deprecated
|
||||
*/
|
||||
FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final boolean deprecated) {
|
||||
this(settings, name, size, queueSize, "thread_pool." + name, deprecated);
|
||||
this(settings, name, size, queueSize, "thread_pool." + name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,41 +62,16 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
|
|||
* @param prefix the prefix for the settings keys
|
||||
*/
|
||||
public FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) {
|
||||
this(settings, name, size, queueSize, prefix, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a fixed executor builder.
|
||||
*
|
||||
* @param settings the node-level settings
|
||||
* @param name the name of the executor
|
||||
* @param size the fixed number of threads
|
||||
* @param queueSize the size of the backing queue, -1 for unbounded
|
||||
* @param prefix the prefix for the settings keys
|
||||
*/
|
||||
private FixedExecutorBuilder(
|
||||
final Settings settings,
|
||||
final String name,
|
||||
final int size,
|
||||
final int queueSize,
|
||||
final String prefix,
|
||||
final boolean deprecated) {
|
||||
super(name);
|
||||
final String sizeKey = settingsKey(prefix, "size");
|
||||
final Setting.Property[] properties;
|
||||
if (deprecated) {
|
||||
properties = new Setting.Property[]{Setting.Property.NodeScope, Setting.Property.Deprecated};
|
||||
} else {
|
||||
properties = new Setting.Property[]{Setting.Property.NodeScope};
|
||||
}
|
||||
this.sizeSetting =
|
||||
new Setting<>(
|
||||
sizeKey,
|
||||
s -> Integer.toString(size),
|
||||
s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey),
|
||||
properties);
|
||||
Setting.Property.NodeScope);
|
||||
final String queueSizeKey = settingsKey(prefix, "queue_size");
|
||||
this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, properties);
|
||||
this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -69,7 +69,6 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
|||
public static final String LISTENER = "listener";
|
||||
public static final String GET = "get";
|
||||
public static final String ANALYZE = "analyze";
|
||||
public static final String INDEX = "index";
|
||||
public static final String BULK = "bulk";
|
||||
public static final String SEARCH = "search";
|
||||
public static final String MANAGEMENT = "management";
|
||||
|
@ -126,7 +125,6 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
|||
map.put(Names.LISTENER, ThreadPoolType.FIXED);
|
||||
map.put(Names.GET, ThreadPoolType.FIXED);
|
||||
map.put(Names.ANALYZE, ThreadPoolType.FIXED);
|
||||
map.put(Names.INDEX, ThreadPoolType.FIXED);
|
||||
map.put(Names.BULK, ThreadPoolType.FIXED);
|
||||
map.put(Names.SEARCH, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE);
|
||||
map.put(Names.MANAGEMENT, ThreadPoolType.SCALING);
|
||||
|
@ -172,7 +170,6 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
|||
final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors);
|
||||
final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512);
|
||||
builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30)));
|
||||
builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200, true));
|
||||
builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 200)); // now that we reuse bulk for index/delete ops
|
||||
builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000));
|
||||
builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16));
|
||||
|
|
|
@ -45,8 +45,8 @@ public class RejectionActionIT extends ESIntegTestCase {
|
|||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("thread_pool.search.size", 1)
|
||||
.put("thread_pool.search.queue_size", 1)
|
||||
.put("thread_pool.index.size", 1)
|
||||
.put("thread_pool.index.queue_size", 1)
|
||||
.put("thread_pool.bulk.size", 1)
|
||||
.put("thread_pool.bulk.queue_size", 1)
|
||||
.put("thread_pool.get.size", 1)
|
||||
.put("thread_pool.get.queue_size", 1)
|
||||
.build();
|
||||
|
|
|
@ -67,7 +67,6 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest
|
|||
request.timeout(randomTimeValue());
|
||||
request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
request.setPreserveExisting(randomBoolean());
|
||||
request.flatSettings(randomBoolean());
|
||||
return request;
|
||||
}
|
||||
|
||||
|
@ -77,7 +76,6 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest
|
|||
result.timeout(request.timeout());
|
||||
result.indicesOptions(request.indicesOptions());
|
||||
result.setPreserveExisting(request.isPreserveExisting());
|
||||
result.flatSettings(request.flatSettings());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ public class TransportBulkActionIngestTests extends ESTestCase {
|
|||
super(Settings.EMPTY, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService,
|
||||
TransportBulkActionIngestTests.this.clusterService,
|
||||
null, null, null, new ActionFilters(Collections.emptySet()), null,
|
||||
IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX, bulkAction, null);
|
||||
IndexRequest::new, IndexRequest::new, ThreadPool.Names.BULK, bulkAction, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -433,7 +433,7 @@ public class IndexCreationTaskTests extends ESTestCase {
|
|||
|
||||
when(docMapper.routingFieldMapper()).thenReturn(routingMapper);
|
||||
|
||||
when(mapper.docMappers(anyBoolean())).thenReturn(Collections.singletonList(docMapper));
|
||||
when(mapper.documentMapper()).thenReturn(docMapper);
|
||||
|
||||
final Index index = new Index("target", "tgt1234");
|
||||
final Supplier<Sort> supplier = mock(Supplier.class);
|
||||
|
|
|
@ -250,7 +250,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
assertBusy(() -> {
|
||||
assertFalse(shard.getTranslog().syncNeeded());
|
||||
assertFalse(shard.isSyncNeeded());
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -275,7 +275,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||
assertNotNull(indexService.getFsyncTask());
|
||||
final IndexShard shard = indexService.getShard(0);
|
||||
assertBusy(() -> assertFalse(shard.getTranslog().syncNeeded()));
|
||||
assertBusy(() -> assertFalse(shard.isSyncNeeded()));
|
||||
|
||||
client()
|
||||
.admin()
|
||||
|
@ -311,7 +311,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
indexService.updateMetaData(metaData);
|
||||
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
assertBusy(() -> assertThat(shard.getTranslog().totalOperations(), equalTo(0)));
|
||||
assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)));
|
||||
}
|
||||
|
||||
public void testIllegalFsyncInterval() {
|
||||
|
|
|
@ -52,7 +52,6 @@ public class BinaryDVFieldDataTests extends AbstractFieldDataTestCase {
|
|||
|
||||
final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping));
|
||||
|
||||
|
||||
List<BytesRef> bytesList1 = new ArrayList<>(2);
|
||||
bytesList1.add(randomBytes());
|
||||
bytesList1.add(randomBytes());
|
||||
|
@ -123,22 +122,26 @@ public class BinaryDVFieldDataTests extends AbstractFieldDataTestCase {
|
|||
// Test whether ScriptDocValues.BytesRefs makes a deepcopy
|
||||
fieldData = indexFieldData.load(reader);
|
||||
ScriptDocValues<?> scriptValues = fieldData.getScriptValues();
|
||||
scriptValues.setNextDocId(0);
|
||||
assertEquals(2, scriptValues.size());
|
||||
assertEquals(bytesList1.get(0), scriptValues.get(0));
|
||||
assertEquals(bytesList1.get(1), scriptValues.get(1));
|
||||
Object[][] retValues = new BytesRef[4][0];
|
||||
for (int i = 0; i < 4; i++) {
|
||||
scriptValues.setNextDocId(i);
|
||||
retValues[i] = new BytesRef[scriptValues.size()];
|
||||
for (int j = 0; j < retValues[i].length; j++) {
|
||||
retValues[i][j] = scriptValues.get(j);
|
||||
}
|
||||
}
|
||||
assertEquals(2, retValues[0].length);
|
||||
assertEquals(bytesList1.get(0), retValues[0][0]);
|
||||
assertEquals(bytesList1.get(1), retValues[0][1]);
|
||||
|
||||
scriptValues.setNextDocId(1);
|
||||
assertEquals(1, scriptValues.size());
|
||||
assertEquals(bytes1, scriptValues.get(0));
|
||||
assertEquals(1, retValues[1].length);
|
||||
assertEquals(bytes1, retValues[1][0]);
|
||||
|
||||
scriptValues.setNextDocId(2);
|
||||
assertEquals(0, scriptValues.size());
|
||||
assertEquals(0, retValues[2].length);
|
||||
|
||||
scriptValues.setNextDocId(3);
|
||||
assertEquals(2, scriptValues.size());
|
||||
assertEquals(bytesList2.get(0), scriptValues.get(0));
|
||||
assertEquals(bytesList2.get(1), scriptValues.get(1));
|
||||
assertEquals(2, retValues[3].length);
|
||||
assertEquals(bytesList2.get(0), retValues[3][0]);
|
||||
assertEquals(bytesList2.get(1), retValues[3][1]);
|
||||
}
|
||||
|
||||
private static BytesRef randomBytes() {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.Priority;
|
||||
|
@ -41,10 +42,12 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI
|
|||
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE;
|
||||
import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.NULL_VALUE;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
@ -349,4 +352,50 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
);
|
||||
assertThat(e.getMessage(), containsString("name cannot be empty string"));
|
||||
}
|
||||
|
||||
public void testNullValue() throws Exception {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("location")
|
||||
.field("type", "geo_point")
|
||||
.field(NULL_VALUE, "1,2")
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser()
|
||||
.parse("type", new CompressedXContent(mapping));
|
||||
FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
|
||||
assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class));
|
||||
|
||||
Object nullValue = fieldMapper.fieldType().nullValue();
|
||||
assertThat(nullValue, equalTo(new GeoPoint(1, 2)));
|
||||
|
||||
ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.nullField("location")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.rootDoc().getField("location"), notNullValue());
|
||||
BytesRef defaultValue = doc.rootDoc().getField("location").binaryValue();
|
||||
|
||||
doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("location", "1, 2")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
// Shouldn't matter if we specify the value explicitly or use null value
|
||||
assertThat(defaultValue, equalTo(doc.rootDoc().getField("location").binaryValue()));
|
||||
|
||||
doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("location", "3, 4")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
// Shouldn't matter if we specify the value explicitly or use null value
|
||||
assertThat(defaultValue, not(equalTo(doc.rootDoc().getField("location").binaryValue())));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||
} else {
|
||||
throw e;
|
||||
}
|
||||
assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING));
|
||||
assertNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING));
|
||||
}
|
||||
|
||||
public void testTotalFieldsExceedsLimit() throws Throwable {
|
||||
|
|
|
@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
public class NullValueTests extends ESSingleNodeTestCase {
|
||||
public void testNullNullValue() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.builder().build());
|
||||
String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte"};
|
||||
String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte", "geo_point"};
|
||||
|
||||
for (String type : typesToTest) {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder()
|
||||
|
|
|
@ -543,7 +543,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
listener.onFailure(e);
|
||||
}
|
||||
},
|
||||
ThreadPool.Names.INDEX, request);
|
||||
ThreadPool.Names.BULK, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -682,7 +682,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
@Override
|
||||
protected PrimaryResult performOnPrimary(
|
||||
final IndexShard primary, final GlobalCheckpointSyncAction.Request request) throws Exception {
|
||||
primary.getTranslog().sync();
|
||||
primary.sync();
|
||||
return new PrimaryResult(request, new ReplicationResponse());
|
||||
}
|
||||
|
||||
|
|
|
@ -330,7 +330,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
|
||||
final Translog.Operation op1;
|
||||
final List<Translog.Operation> initOperations = new ArrayList<>(initDocs);
|
||||
try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) {
|
||||
try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) {
|
||||
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
||||
for (int i = 0; i < initDocs; i++) {
|
||||
Translog.Operation op = snapshot.next();
|
||||
|
@ -347,7 +347,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed.
|
||||
shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON));
|
||||
final Translog.Operation op2;
|
||||
try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) {
|
||||
try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) {
|
||||
assertThat(snapshot.totalOperations(), equalTo(initDocs + 2));
|
||||
op2 = snapshot.next();
|
||||
assertThat(op2.seqNo(), equalTo(op1.seqNo()));
|
||||
|
@ -362,7 +362,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
shards.promoteReplicaToPrimary(replica2);
|
||||
logger.info("--> Recover replica3 from replica2");
|
||||
recoverReplica(replica3, replica2);
|
||||
try (Translog.Snapshot snapshot = replica3.getTranslog().newSnapshot()) {
|
||||
try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) {
|
||||
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
||||
assertThat(snapshot.next(), equalTo(op2));
|
||||
assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations));
|
||||
|
@ -468,7 +468,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
long expectedPrimaryTerm,
|
||||
String failureMessage) throws IOException {
|
||||
for (IndexShard indexShard : replicationGroup) {
|
||||
try(Translog.Snapshot snapshot = indexShard.getTranslog().newSnapshot()) {
|
||||
try(Translog.Snapshot snapshot = getTranslog(indexShard).newSnapshot()) {
|
||||
assertThat(snapshot.totalOperations(), equalTo(expectedOperation));
|
||||
long expectedSeqNo = 0L;
|
||||
Translog.Operation op = snapshot.next();
|
||||
|
|
|
@ -129,7 +129,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
shards.flush();
|
||||
translogTrimmed = randomBoolean();
|
||||
if (translogTrimmed) {
|
||||
final Translog translog = shards.getPrimary().getTranslog();
|
||||
final Translog translog = getTranslog(shards.getPrimary());
|
||||
translog.getDeletionPolicy().setRetentionAgeInMillis(0);
|
||||
translog.trimUnreferencedReaders();
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
// otherwise the deletion policy won't trim translog
|
||||
assertBusy(() -> {
|
||||
shards.syncGlobalCheckpoint();
|
||||
assertThat(newPrimary.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo()));
|
||||
assertThat(newPrimary.getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo()));
|
||||
});
|
||||
newPrimary.flush(new FlushRequest());
|
||||
uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10));
|
||||
|
@ -341,7 +341,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
// Index more docs - move the global checkpoint >= seqno of the stale operations.
|
||||
goodDocs += shards.indexDocs(scaledRandomIntBetween(staleDocs, staleDocs * 5));
|
||||
shards.syncGlobalCheckpoint();
|
||||
assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo()));
|
||||
assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo()));
|
||||
// Recover a replica again should also rollback the stale documents.
|
||||
shards.removeReplica(replica);
|
||||
replica.close("recover replica - second time", false);
|
||||
|
|
|
@ -76,14 +76,26 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
GeoPoint point = GeoUtils.parseGeoPoint(objectLatLon(randomPt.lat(), randomPt.lon()));
|
||||
assertPointsEqual(point, randomPt);
|
||||
|
||||
GeoUtils.parseGeoPoint(toObject(objectLatLon(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||
assertPointsEqual(point, randomPt);
|
||||
|
||||
GeoUtils.parseGeoPoint(arrayLatLon(randomPt.lat(), randomPt.lon()), point);
|
||||
assertPointsEqual(point, randomPt);
|
||||
|
||||
GeoUtils.parseGeoPoint(toObject(arrayLatLon(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||
assertPointsEqual(point, randomPt);
|
||||
|
||||
GeoUtils.parseGeoPoint(geohash(randomPt.lat(), randomPt.lon()), point);
|
||||
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||
|
||||
GeoUtils.parseGeoPoint(toObject(geohash(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||
|
||||
GeoUtils.parseGeoPoint(stringLatLon(randomPt.lat(), randomPt.lon()), point);
|
||||
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||
|
||||
GeoUtils.parseGeoPoint(toObject(stringLatLon(randomPt.lat(), randomPt.lon())), randomBoolean());
|
||||
assertCloseTo(point, randomPt.lat(), randomPt.lon());
|
||||
}
|
||||
|
||||
// Based on #5390
|
||||
|
@ -99,6 +111,12 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
parser.nextToken();
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
|
||||
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||
parser2.nextToken();
|
||||
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
|
||||
public void testInvalidPointLatHashMix() throws IOException {
|
||||
|
@ -109,9 +127,14 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||
parser.nextToken();
|
||||
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
|
||||
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||
parser2.nextToken();
|
||||
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
}
|
||||
|
||||
public void testInvalidPointLonHashMix() throws IOException {
|
||||
|
@ -125,6 +148,12 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
|
||||
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||
parser2.nextToken();
|
||||
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
}
|
||||
|
||||
public void testInvalidField() throws IOException {
|
||||
|
@ -135,9 +164,15 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||
parser.nextToken();
|
||||
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
|
||||
|
||||
XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content));
|
||||
parser2.nextToken();
|
||||
e = expectThrows(ElasticsearchParseException.class, () ->
|
||||
GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean()));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
|
||||
private XContentParser objectLatLon(double lat, double lon) throws IOException {
|
||||
|
@ -183,4 +218,22 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
assertEquals(point.lat(), lat, TOLERANCE);
|
||||
assertEquals(point.lon(), lon, TOLERANCE);
|
||||
}
|
||||
|
||||
public static Object toObject(XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.VALUE_NULL) {
|
||||
return null;
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
return parser.text();
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
return parser.numberValue();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
return parser.map();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
return parser.list();
|
||||
} else {
|
||||
fail("Unexpected token " + token);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,9 +90,6 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase {
|
|||
final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST);
|
||||
when(indexShard.getTranslogDurability()).thenReturn(durability);
|
||||
|
||||
final Translog translog = mock(Translog.class);
|
||||
when(indexShard.getTranslog()).thenReturn(translog);
|
||||
|
||||
final long globalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Integer.MAX_VALUE);
|
||||
final long lastSyncedGlobalCheckpoint;
|
||||
if (randomBoolean() && globalCheckpoint != SequenceNumbers.NO_OPS_PERFORMED) {
|
||||
|
@ -104,7 +101,7 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint);
|
||||
when(translog.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint);
|
||||
when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint);
|
||||
|
||||
final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction(
|
||||
Settings.EMPTY,
|
||||
|
|
|
@ -107,6 +107,7 @@ import static org.hamcrest.Matchers.allOf;
|
|||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
|
||||
|
||||
public class IndexShardIT extends ESSingleNodeTestCase {
|
||||
|
||||
|
@ -167,7 +168,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
|||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
Translog translog = ShardUtilsTests.getShardEngine(shard).getTranslog();
|
||||
Translog translog = getTranslog(shard);
|
||||
Predicate<Translog> needsSync = (tlog) -> {
|
||||
// we can't use tlog.needsSync() here since it also takes the global checkpoint into account
|
||||
// we explicitly want to check here if our durability checks are taken into account so we only
|
||||
|
@ -343,7 +344,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
|||
SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON),
|
||||
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
|
||||
assertTrue(shard.shouldPeriodicallyFlush());
|
||||
final Translog translog = shard.getEngine().getTranslog();
|
||||
final Translog translog = getTranslog(shard);
|
||||
assertEquals(2, translog.stats().getUncommittedOperations());
|
||||
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON)
|
||||
.setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
||||
|
@ -384,7 +385,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
|||
final IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
int rolls = 0;
|
||||
final Translog translog = shard.getEngine().getTranslog();
|
||||
final Translog translog = getTranslog(shard);
|
||||
final long generation = translog.currentFileGeneration();
|
||||
final int numberOfDocuments = randomIntBetween(32, 128);
|
||||
for (int i = 0; i < numberOfDocuments; i++) {
|
||||
|
@ -454,11 +455,11 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
|||
assertThat(shard.flushStats().getPeriodic(), equalTo(periodic + 1));
|
||||
};
|
||||
} else {
|
||||
final long generation = shard.getEngine().getTranslog().currentFileGeneration();
|
||||
final long generation = getTranslog(shard).currentFileGeneration();
|
||||
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
|
||||
check = () -> assertEquals(
|
||||
generation + 1,
|
||||
shard.getEngine().getTranslog().currentFileGeneration());
|
||||
getTranslog(shard).currentFileGeneration());
|
||||
}
|
||||
assertBusy(check);
|
||||
running.set(false);
|
||||
|
|
|
@ -72,7 +72,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
|
@ -286,14 +285,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShards(indexShard);
|
||||
assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
|
||||
try {
|
||||
indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, "");
|
||||
indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.BULK, "");
|
||||
fail("we should not be able to increment anymore");
|
||||
} catch (IndexShardClosedException e) {
|
||||
// expected
|
||||
}
|
||||
try {
|
||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
||||
ThreadPool.Names.INDEX, "");
|
||||
ThreadPool.Names.BULK, "");
|
||||
fail("we should not be able to increment anymore");
|
||||
} catch (IndexShardClosedException e) {
|
||||
// expected
|
||||
|
@ -304,7 +303,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
IndexShard indexShard = newShard(false);
|
||||
expectThrows(IndexShardNotStartedException.class, () ->
|
||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100),
|
||||
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX, ""));
|
||||
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.BULK, ""));
|
||||
closeShards(indexShard);
|
||||
}
|
||||
|
||||
|
@ -344,7 +343,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
},
|
||||
ThreadPool.Names.INDEX, id);
|
||||
ThreadPool.Names.BULK, id);
|
||||
});
|
||||
thread.start();
|
||||
threads.add(thread);
|
||||
|
@ -395,7 +394,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
},
|
||||
ThreadPool.Names.INDEX, id);
|
||||
ThreadPool.Names.BULK, id);
|
||||
});
|
||||
thread.start();
|
||||
delayedThreads.add(thread);
|
||||
|
@ -519,7 +518,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
public void testPrimaryPromotionRollsGeneration() throws Exception {
|
||||
final IndexShard indexShard = newStartedShard(false);
|
||||
|
||||
final long currentTranslogGeneration = indexShard.getTranslog().getGeneration().translogFileGeneration;
|
||||
final long currentTranslogGeneration = getTranslog(indexShard).getGeneration().translogFileGeneration;
|
||||
|
||||
// promote the replica
|
||||
final ShardRouting replicaRouting = indexShard.routingEntry();
|
||||
|
@ -557,8 +556,8 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
ThreadPool.Names.GENERIC, "");
|
||||
|
||||
latch.await();
|
||||
assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1));
|
||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
||||
assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
|
||||
closeShards(indexShard);
|
||||
}
|
||||
|
@ -579,7 +578,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
|
||||
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000);
|
||||
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
|
||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
}, 0L,
|
||||
Collections.singleton(indexShard.routingEntry().allocationId().getId()),
|
||||
new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(),
|
||||
|
@ -591,7 +590,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertEquals(0, indexShard.getActiveOperationsCount());
|
||||
if (indexShard.routingEntry().isRelocationTarget() == false) {
|
||||
try {
|
||||
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.INDEX, "");
|
||||
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.BULK, "");
|
||||
fail("shard shouldn't accept operations as replica");
|
||||
} catch (IllegalStateException ignored) {
|
||||
|
||||
|
@ -610,14 +609,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
|
||||
private Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException {
|
||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||
indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, "");
|
||||
indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.BULK, "");
|
||||
return fut.get();
|
||||
}
|
||||
|
||||
private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm)
|
||||
throws ExecutionException, InterruptedException {
|
||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||
indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.INDEX, "");
|
||||
indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.BULK, "");
|
||||
return fut.get();
|
||||
}
|
||||
|
||||
|
@ -665,12 +664,12 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
if (shardRouting.primary() == false) {
|
||||
final IllegalStateException e =
|
||||
expectThrows(IllegalStateException.class,
|
||||
() -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, ""));
|
||||
() -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.BULK, ""));
|
||||
assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary")));
|
||||
}
|
||||
|
||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||
final long translogGen = engineClosed ? -1 : indexShard.getTranslog().getGeneration().translogFileGeneration;
|
||||
final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration;
|
||||
|
||||
final Releasable operation1;
|
||||
final Releasable operation2;
|
||||
|
@ -702,7 +701,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
};
|
||||
|
||||
indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbers.UNASSIGNED_SEQ_NO, onLockAcquired,
|
||||
ThreadPool.Names.INDEX, "");
|
||||
ThreadPool.Names.BULK, "");
|
||||
|
||||
assertFalse(onResponse.get());
|
||||
assertTrue(onFailure.get());
|
||||
|
@ -748,7 +747,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
@Override
|
||||
public void onResponse(Releasable releasable) {
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
||||
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
||||
onResponse.set(true);
|
||||
|
@ -794,25 +793,25 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertFalse(onResponse.get());
|
||||
assertNull(onFailure.get());
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||
Releasables.close(operation1);
|
||||
// our operation should still be blocked
|
||||
assertFalse(onResponse.get());
|
||||
assertNull(onFailure.get());
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||
Releasables.close(operation2);
|
||||
barrier.await();
|
||||
// now lock acquisition should have succeeded
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
if (engineClosed) {
|
||||
assertFalse(onResponse.get());
|
||||
assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class));
|
||||
} else {
|
||||
assertTrue(onResponse.get());
|
||||
assertNull(onFailure.get());
|
||||
assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(translogGen + 1));
|
||||
assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(translogGen + 1));
|
||||
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
||||
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
||||
}
|
||||
|
@ -1023,7 +1022,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
latch.countDown();
|
||||
}
|
||||
},
|
||||
ThreadPool.Names.INDEX, "");
|
||||
ThreadPool.Names.BULK, "");
|
||||
};
|
||||
|
||||
final long firstIncrement = 1 + (randomBoolean() ? 0 : 1);
|
||||
|
@ -1384,7 +1383,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
super.onResponse(releasable);
|
||||
}
|
||||
};
|
||||
shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.INDEX, "i_" + i);
|
||||
shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.BULK, "i_" + i);
|
||||
onLockAcquiredActions.add(onLockAcquired);
|
||||
}
|
||||
|
||||
|
@ -1649,7 +1648,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertEquals(1, newShard.recoveryState().getTranslog().totalOperations());
|
||||
assertEquals(1, newShard.recoveryState().getTranslog().totalOperationsOnStart());
|
||||
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
|
||||
try (Translog.Snapshot snapshot = newShard.getTranslog().newSnapshot()) {
|
||||
try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) {
|
||||
Translog.Operation operation;
|
||||
int numNoops = 0;
|
||||
while ((operation = snapshot.next()) != null) {
|
||||
|
@ -2050,7 +2049,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
@Override
|
||||
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) throws IOException {
|
||||
final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
assertFalse(replica.getTranslog().syncNeeded());
|
||||
assertFalse(replica.isSyncNeeded());
|
||||
return localCheckpoint;
|
||||
}
|
||||
}, true);
|
||||
|
|
|
@ -133,7 +133,7 @@ public class RefreshListenersTests extends ESTestCase {
|
|||
(e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm);
|
||||
engine = new InternalEngine(config);
|
||||
engine.recoverFromTranslog();
|
||||
listeners.setTranslog(engine.getTranslog());
|
||||
listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -148,7 +148,7 @@ public class ClusterStateChanges extends AbstractComponent {
|
|||
when(indexService.index()).thenReturn(indexMetaData.getIndex());
|
||||
MapperService mapperService = mock(MapperService.class);
|
||||
when(indexService.mapperService()).thenReturn(mapperService);
|
||||
when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList());
|
||||
when(mapperService.documentMapper()).thenReturn(null);
|
||||
when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {});
|
||||
when(indexService.getIndexSortSupplier()).thenReturn(() -> null);
|
||||
return indexService;
|
||||
|
|
|
@ -113,7 +113,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
|
|||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
final ShardId shardId = shard.shardId();
|
||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||
shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, "");
|
||||
shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.BULK, "");
|
||||
try (Releasable operationLock = fut.get()) {
|
||||
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.attemptSyncedFlush(shardId, listener);
|
||||
|
|
|
@ -59,7 +59,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
|||
}
|
||||
flushShard(replica);
|
||||
replica.updateGlobalCheckpointOnReplica(initDocs - 1, "test");
|
||||
replica.getTranslog().sync();
|
||||
replica.sync();
|
||||
final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs));
|
||||
recoveryTarget.decRef();
|
||||
|
@ -81,7 +81,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
|||
// Advances the global checkpoint, a safe commit also advances
|
||||
{
|
||||
replica.updateGlobalCheckpointOnReplica(initDocs + moreDocs - 1, "test");
|
||||
replica.getTranslog().sync();
|
||||
replica.sync();
|
||||
final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs + moreDocs));
|
||||
recoveryTarget.decRef();
|
||||
|
|
|
@ -61,7 +61,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
try (ReplicationGroup shards = createGroup(0)) {
|
||||
shards.startPrimary();
|
||||
int docs = shards.indexDocs(10);
|
||||
shards.getPrimary().getTranslog().rollGeneration();
|
||||
getTranslog(shards.getPrimary()).rollGeneration();
|
||||
shards.flush();
|
||||
if (randomBoolean()) {
|
||||
docs += shards.indexDocs(10);
|
||||
|
@ -69,7 +69,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
shards.addReplica();
|
||||
shards.startAll();
|
||||
final IndexShard replica = shards.getReplicas().get(0);
|
||||
assertThat(replica.getTranslog().totalOperations(), equalTo(docs));
|
||||
assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(docs));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
try (ReplicationGroup shards = createGroup(0)) {
|
||||
shards.startPrimary();
|
||||
shards.indexDocs(10);
|
||||
shards.getPrimary().getTranslog().rollGeneration();
|
||||
getTranslog(shards.getPrimary()).rollGeneration();
|
||||
shards.flush();
|
||||
shards.indexDocs(10);
|
||||
final IndexShard replica = shards.addReplica();
|
||||
|
@ -99,7 +99,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
releaseRecovery.countDown();
|
||||
future.get();
|
||||
// rolling/flushing is async
|
||||
assertBusy(() -> assertThat(replica.getTranslog().totalOperations(), equalTo(0)));
|
||||
assertBusy(() -> assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(0)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
|
||||
// delete #1
|
||||
orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL, u -> {});
|
||||
orgReplica.getTranslog().rollGeneration(); // isolate the delete in it's own generation
|
||||
getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation
|
||||
// index #0
|
||||
orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
|
||||
SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON), u -> {});
|
||||
|
@ -167,7 +167,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
shards.recoverReplica(newReplica);
|
||||
shards.assertAllEqual(3);
|
||||
|
||||
assertThat(newReplica.getTranslog().totalOperations(), equalTo(translogOps));
|
||||
assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(translogOps));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
|
||||
IndexShard replica = shards.getReplicas().get(0);
|
||||
final String historyUUID = replica.getHistoryUUID();
|
||||
Translog.TranslogGeneration translogGeneration = replica.getTranslog().getGeneration();
|
||||
Translog.TranslogGeneration translogGeneration = getTranslog(replica).getGeneration();
|
||||
shards.removeReplica(replica);
|
||||
replica.close("test", false);
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(null)
|
||||
|
@ -219,7 +219,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
shards.recoverReplica(newReplica);
|
||||
// file based recovery should be made
|
||||
assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty()));
|
||||
assertThat(newReplica.getTranslog().totalOperations(), equalTo(numDocs));
|
||||
assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs));
|
||||
|
||||
// history uuid was restored
|
||||
assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID));
|
||||
|
@ -238,7 +238,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
}
|
||||
final IndexShard replica = shards.addReplica();
|
||||
shards.recoverReplica(replica);
|
||||
assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1));
|
||||
assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -291,7 +291,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId());
|
||||
shards.recoverReplica(newReplica);
|
||||
|
||||
try (Translog.Snapshot snapshot = newReplica.getTranslog().newSnapshot()) {
|
||||
try (Translog.Snapshot snapshot = getTranslog(newReplica).newSnapshot()) {
|
||||
assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs));
|
||||
}
|
||||
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs));
|
||||
|
@ -321,7 +321,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
shards.recoverReplica(replica);
|
||||
// Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false)
|
||||
assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false)));
|
||||
assertThat(replica.getTranslog().totalOperations(), equalTo(numDocs));
|
||||
assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs));
|
||||
shards.assertAllEqual(numDocs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,8 +68,8 @@ import static org.hamcrest.Matchers.is;
|
|||
import static org.hamcrest.core.IsNull.notNullValue;
|
||||
import static org.hamcrest.core.IsNull.nullValue;
|
||||
|
||||
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456")
|
||||
@ESIntegTestCase.SuiteScopeTestCase
|
||||
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456")
|
||||
public class MovAvgIT extends ESIntegTestCase {
|
||||
private static final String INTERVAL_FIELD = "l_value";
|
||||
private static final String VALUE_FIELD = "v_value";
|
||||
|
@ -1296,7 +1296,7 @@ public class MovAvgIT extends ESIntegTestCase {
|
|||
} else {
|
||||
assertThat("[_count] movavg is null", countMovAvg, notNullValue());
|
||||
assertEquals("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]",
|
||||
countMovAvg.value(), expectedCount, 0.1);
|
||||
countMovAvg.value(), expectedCount, 0.1 * Math.abs(countMovAvg.value()));
|
||||
}
|
||||
|
||||
// This is a gap bucket
|
||||
|
@ -1308,7 +1308,7 @@ public class MovAvgIT extends ESIntegTestCase {
|
|||
} else {
|
||||
assertThat("[value] movavg is null", valuesMovAvg, notNullValue());
|
||||
assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]",
|
||||
valuesMovAvg.value(), expectedValue, 0.1);
|
||||
valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -85,10 +85,6 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase {
|
|||
|
||||
assertThat(counter, equalTo(rejections));
|
||||
assertThat(stats(threadPool, threadPoolName).getRejected(), equalTo(rejections));
|
||||
|
||||
if (threadPoolName.equals(ThreadPool.Names.INDEX)) {
|
||||
assertSettingDeprecationsAndWarnings(new String[]{"thread_pool.index.queue_size", "thread_pool.index.size"});
|
||||
}
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(threadPool);
|
||||
}
|
||||
|
|
|
@ -60,8 +60,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testIndexingThreadPoolsMaxSize() throws InterruptedException {
|
||||
final String name = randomFrom(Names.BULK, Names.INDEX);
|
||||
public void testBulkThreadPoolsMaxSize() {
|
||||
final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY);
|
||||
final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE);
|
||||
|
||||
|
@ -74,7 +73,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
|||
try {
|
||||
tp = new ThreadPool(Settings.builder()
|
||||
.put("node.name", "testIndexingThreadPoolsMaxSize")
|
||||
.put("thread_pool." + name + ".size", tooBig)
|
||||
.put("thread_pool." + Names.BULK + ".size", tooBig)
|
||||
.build());
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(tp);
|
||||
|
@ -84,15 +83,11 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
|||
assertThat(
|
||||
initial,
|
||||
hasToString(containsString(
|
||||
"Failed to parse value [" + tooBig + "] for setting [thread_pool." + name + ".size] must be ")));
|
||||
|
||||
if (name.equals(Names.INDEX)) {
|
||||
assertSettingDeprecationsAndWarnings(new String[] { "thread_pool.index.size" });
|
||||
}
|
||||
"Failed to parse value [" + tooBig + "] for setting [thread_pool." + Names.BULK + ".size] must be ")));
|
||||
}
|
||||
|
||||
private static int getExpectedThreadPoolSize(Settings settings, String name, int size) {
|
||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
||||
if (name.equals(ThreadPool.Names.BULK)) {
|
||||
return Math.min(size, EsExecutors.numberOfProcessors(settings));
|
||||
} else {
|
||||
return size;
|
||||
|
@ -120,10 +115,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
|||
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
|
||||
// keep alive does not apply to fixed thread pools
|
||||
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
|
||||
|
||||
if (threadPoolName.equals(Names.INDEX)) {
|
||||
assertSettingDeprecationsAndWarnings(new String[] { "thread_pool.index.size" });
|
||||
}
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(threadPool);
|
||||
}
|
||||
|
@ -179,10 +170,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase {
|
|||
latch.await(3, TimeUnit.SECONDS); // if this throws then ThreadPool#shutdownNow did not interrupt
|
||||
assertThat(oldExecutor.isShutdown(), equalTo(true));
|
||||
assertThat(oldExecutor.isTerminating() || oldExecutor.isTerminated(), equalTo(true));
|
||||
|
||||
if (threadPoolName.equals(Names.INDEX)) {
|
||||
assertSettingDeprecationsAndWarnings(new String[] { "thread_pool.index.queue_size" });
|
||||
}
|
||||
} finally {
|
||||
terminateThreadPoolIfNeeded(threadPool);
|
||||
}
|
||||
|
|
|
@ -645,4 +645,10 @@ public abstract class EngineTestCase extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Exposes a translog associated with the given engine for testing purpose.
|
||||
*/
|
||||
public static Translog getTranslog(Engine engine) {
|
||||
return engine.getTranslog();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.elasticsearch.index.cache.IndexCache;
|
|||
import org.elasticsearch.index.cache.query.DisabledQueryCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.engine.EngineTestCase;
|
||||
import org.elasticsearch.index.engine.InternalEngineFactory;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -67,6 +68,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
|
|||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
import org.elasticsearch.index.store.DirectoryService;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
|
@ -644,6 +646,10 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
return indexShard.getEngine();
|
||||
}
|
||||
|
||||
public static Translog getTranslog(IndexShard shard) {
|
||||
return EngineTestCase.getTranslog(getEngine(shard));
|
||||
}
|
||||
|
||||
public static ReplicationTracker getReplicationTracker(IndexShard indexShard) {
|
||||
return indexShard.getReplicationTracker();
|
||||
}
|
||||
|
|
|
@ -76,6 +76,7 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
@ -1158,7 +1159,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
for (IndexService indexService : indexServices) {
|
||||
for (IndexShard indexShard : indexService) {
|
||||
try {
|
||||
indexShard.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs();
|
||||
IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs();
|
||||
} catch (AlreadyClosedException ok) {
|
||||
// all good
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ public class VersionUtils {
|
|||
|
||||
Version last = versions.remove(versions.size() - 1);
|
||||
assert last.equals(current) : "The highest version must be the current one "
|
||||
+ "but was [" + versions.get(versions.size() - 1) + "] and current was [" + current + "]";
|
||||
+ "but was [" + last + "] and current was [" + current + "]";
|
||||
|
||||
if (current.revision != 0) {
|
||||
/* If we are in a stable branch there should be no unreleased version constants
|
||||
|
|
|
@ -28,9 +28,9 @@ import java.util.Collections;
|
|||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.stream.Collectors.toCollection;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
@ -305,6 +305,24 @@ public class VersionUtilsTests extends ESTestCase {
|
|||
TestNewMinorBranchIn6x.V_6_2_0)));
|
||||
}
|
||||
|
||||
public static class TestIncorrectCurrentVersion {
|
||||
public static final Version V_5_3_0 = Version.fromString("5.3.0");
|
||||
public static final Version V_5_3_1 = Version.fromString("5.3.1");
|
||||
public static final Version V_5_4_0 = Version.fromString("5.4.0");
|
||||
public static final Version V_5_4_1 = Version.fromString("5.4.1");
|
||||
public static final Version CURRENT = V_5_4_1;
|
||||
}
|
||||
|
||||
public void testIncorrectCurrentVersion() {
|
||||
Version previousVersion = TestIncorrectCurrentVersion.V_5_4_0;
|
||||
AssertionError error = expectThrows(AssertionError.class, () ->
|
||||
VersionUtils.resolveReleasedVersions(previousVersion, TestIncorrectCurrentVersion.class));
|
||||
|
||||
String message = error.getMessage();
|
||||
assertThat(message, containsString(TestIncorrectCurrentVersion.CURRENT.toString()));
|
||||
assertThat(message, containsString(previousVersion.toString()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that {@link Version#minimumCompatibilityVersion()} and {@link VersionUtils#allReleasedVersions()}
|
||||
* agree with the list of wire and index compatible versions we build in gradle.
|
||||
|
|
Loading…
Reference in New Issue