Merge branch 'master' into index-lifecycle
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClien tPlugin.java /Users/colings86/dev/work/git/elasticsearch/.git/worktrees/elasticsearch -ilm/MERGE_HEAD buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlu gin.groovy docs/reference/migration/migrate_7_0/settings.asciidoc modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Net ty4HttpChannel.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Net ty4HttpPipeliningHandler.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Net ty4HttpRequestHandler.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Net ty4HttpResponse.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Net ty4HttpServerTransport.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pip elining/HttpPipelinedRequest.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pip elining/HttpPipelinedResponse.java modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pip elining/HttpPipeliningHandler.java modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Net ty4HttpChannelTests.java modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pip elining/Netty4HttpPipeliningHandlerTests.java -> modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Net ty4HttpPipeliningHandlerTests.java modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Net ty4HttpServerPipeliningTests.java modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Net ty4PipeliningEnabledIT.java -> modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Net ty4PipeliningIT.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadW riteHandler.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWrite Operation.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdap tor.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyList ener.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpCh annel.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPi peliningHandler.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRe sponse.java plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpSe rverTransport.java plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.j ava plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadW riteHandlerTests.java plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPi peliningHandlerTests.java modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Net ty4PipeliningDisabledIT.java -> plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeli ningIT.java qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java qa/vagrant/src/main/java/org/elasticsearch/packaging/VMTestRunner.java qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCas e.java qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTest s.java qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTest s.java qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.ja va qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.ja va qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.j ava qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileMatcher.ja va qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.j ava qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java server/src/main/java/org/elasticsearch/client/transport/TransportClient. java server/src/main/java/org/elasticsearch/cluster/ClusterModule.java server/src/main/java/org/elasticsearch/cluster/service/ClusterApplier.ja va server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierSer vice.java server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.j ava server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscov ery.java server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.jav a server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder. java server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomM etaData.java server/src/main/java/org/elasticsearch/transport/TcpTransport.java server/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestC ase.java server/src/test/java/org/elasticsearch/client/transport/TransportClientT ests.java server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierSer viceTests.java server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscov eryIT.java server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscov eryTests.java server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTes ts.java server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderT ests.java server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegra tionIT.java server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlu gin.java server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT. java test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils. java test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster. java test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClu sterTests.java x-pack/docs/en/security/auditing/event-types.asciidoc x-pack/docs/en/security/auditing/forwarding-logs.asciidoc x-pack/docs/en/security/auditing/output-index.asciidoc x-pack/docs/en/security/auditing/output-logfile.asciidoc x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClien tPlugin.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrailTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/index/RemoteIndexAuditTrailStartingTests.java x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integ ration/MlNativeAutodetectIntegTestCase.java
This commit is contained in:
commit
84c4598e47
|
@ -512,7 +512,9 @@ into it
|
|||
vagrant ssh debian-9
|
||||
--------------------------------------------
|
||||
|
||||
Now inside the VM, to run the https://github.com/sstephenson/bats[bats] packaging tests
|
||||
Now inside the VM, start the packaging tests from the terminal. There are two packaging
|
||||
test projects. The old ones are written with https://github.com/sstephenson/bats[bats]
|
||||
and only run on linux. To run them do
|
||||
|
||||
--------------------------------------------
|
||||
cd $PACKAGING_ARCHIVES
|
||||
|
@ -524,18 +526,36 @@ sudo bats $BATS_TESTS/*.bats
|
|||
sudo bats $BATS_TESTS/20_tar_package.bats $BATS_TESTS/25_tar_plugins.bats
|
||||
--------------------------------------------
|
||||
|
||||
To run the Java packaging tests, again inside the VM
|
||||
The new packaging tests are written in Java and run on both linux and windows. On
|
||||
linux (again, inside the VM)
|
||||
|
||||
--------------------------------------------
|
||||
bash $PACKAGING_TESTS/run-tests.sh
|
||||
# run the full suite
|
||||
sudo bash $PACKAGING_TESTS/run-tests.sh
|
||||
|
||||
# run specific test cases
|
||||
sudo bash $PACKAGING_TESTS/run-tests.sh \
|
||||
org.elasticsearch.packaging.test.DefaultZipTests \
|
||||
org.elasticsearch.packaging.test.OssZipTests
|
||||
--------------------------------------------
|
||||
|
||||
or on Windows
|
||||
or on Windows, from a terminal running as Administrator
|
||||
|
||||
--------------------------------------------
|
||||
# run the full suite
|
||||
powershell -File $Env:PACKAGING_TESTS/run-tests.ps1
|
||||
|
||||
# run specific test cases
|
||||
powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 `
|
||||
org.elasticsearch.packaging.test.DefaultZipTests `
|
||||
org.elasticsearch.packaging.test.OssZipTests
|
||||
--------------------------------------------
|
||||
|
||||
Note that on Windows boxes when running from inside the GUI, you may have to log out and
|
||||
back in to the `vagrant` user (password `vagrant`) for the environment variables that
|
||||
locate the packaging tests and distributions to take effect, due to how vagrant provisions
|
||||
Windows machines.
|
||||
|
||||
When you've made changes you want to test, keep the VM up and reload the tests and
|
||||
distributions inside by running (on the host)
|
||||
|
||||
|
|
|
@ -237,6 +237,7 @@ def linux_common(config,
|
|||
|
||||
config.vm.provision 'markerfile', type: 'shell', inline: <<-SHELL
|
||||
touch /etc/is_vagrant_vm
|
||||
touch /is_vagrant_vm # for consistency between linux and windows
|
||||
SHELL
|
||||
|
||||
# This prevents leftovers from previous tests using the
|
||||
|
|
|
@ -11,6 +11,7 @@ import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
|
|||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.StopExecutionException
|
||||
import org.gradle.api.tasks.TaskState
|
||||
|
||||
import static java.util.Collections.unmodifiableList
|
||||
|
@ -52,6 +53,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
static final List<String> DISTRIBUTIONS = unmodifiableList([
|
||||
'archives:tar',
|
||||
'archives:oss-tar',
|
||||
'archives:zip',
|
||||
'archives:oss-zip',
|
||||
'packages:rpm',
|
||||
'packages:oss-rpm',
|
||||
'packages:deb',
|
||||
|
@ -242,13 +245,27 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) {
|
||||
dependsOn copyPackagingTests
|
||||
file "${testsDir}/run-tests.sh"
|
||||
contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}"
|
||||
contents """\
|
||||
if [ "\$#" -eq 0 ]; then
|
||||
test_args=( "${-> project.extensions.esvagrant.testClass}" )
|
||||
else
|
||||
test_args=( "\$@" )
|
||||
fi
|
||||
java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}"
|
||||
"""
|
||||
}
|
||||
Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) {
|
||||
dependsOn copyPackagingTests
|
||||
file "${testsDir}/run-tests.ps1"
|
||||
// the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely
|
||||
// a little trappy for those unfamiliar with powershell
|
||||
contents """\
|
||||
java -cp "\$Env:PACKAGING_TESTS/*" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}
|
||||
if (\$args.Count -eq 0) {
|
||||
\$testArgs = @("${-> project.extensions.esvagrant.testClass}")
|
||||
} else {
|
||||
\$testArgs = \$args
|
||||
}
|
||||
java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs
|
||||
exit \$LASTEXITCODE
|
||||
"""
|
||||
}
|
||||
|
@ -269,8 +286,10 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
dependsOn copyPackagingArchives
|
||||
doFirst {
|
||||
project.delete("${archivesDir}/upgrade_is_oss")
|
||||
if (project.extensions.esvagrant.upgradeFromVersion.before('6.3.0')) {
|
||||
throw new StopExecutionException("upgrade version is before 6.3.0")
|
||||
}
|
||||
}
|
||||
onlyIf { project.extensions.esvagrant.upgradeFromVersion.onOrAfter('6.3.0') }
|
||||
file "${archivesDir}/upgrade_is_oss"
|
||||
contents ''
|
||||
}
|
||||
|
@ -525,9 +544,10 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
|
||||
if (LINUX_BOXES.contains(box)) {
|
||||
javaPackagingTest.command = 'ssh'
|
||||
javaPackagingTest.args = ['--command', 'bash "$PACKAGING_TESTS/run-tests.sh"']
|
||||
javaPackagingTest.args = ['--command', 'sudo bash "$PACKAGING_TESTS/run-tests.sh"']
|
||||
} else {
|
||||
javaPackagingTest.command = 'winrm'
|
||||
// winrm commands run as administrator
|
||||
javaPackagingTest.args = ['--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"']
|
||||
}
|
||||
|
||||
|
|
|
@ -242,6 +242,8 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
|
|||
if (it.relativePath.segments[-2] == 'bin') {
|
||||
// bin files, wherever they are within modules (eg platform specific) should be executable
|
||||
it.mode = 0755
|
||||
} else {
|
||||
it.mode = 0644
|
||||
}
|
||||
}
|
||||
if (oss) {
|
||||
|
|
|
@ -122,6 +122,7 @@ Closure commonPackageConfig(String type, boolean oss) {
|
|||
}
|
||||
from(rootProject.projectDir) {
|
||||
include 'README.textile'
|
||||
fileMode 0644
|
||||
}
|
||||
into('modules') {
|
||||
with copySpec {
|
||||
|
@ -135,6 +136,11 @@ Closure commonPackageConfig(String type, boolean oss) {
|
|||
for (int i = segments.length - 2; i > 0 && segments[i] != 'modules'; --i) {
|
||||
directory('/' + segments[0..i].join('/'), 0755)
|
||||
}
|
||||
if (segments[-2] == 'bin') {
|
||||
fcp.mode = 0755
|
||||
} else {
|
||||
fcp.mode = 0644
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -153,6 +159,7 @@ Closure commonPackageConfig(String type, boolean oss) {
|
|||
include oss ? 'APACHE-LICENSE-2.0.txt' : 'ELASTIC-LICENSE.txt'
|
||||
rename { 'LICENSE.txt' }
|
||||
}
|
||||
fileMode 0644
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,14 +187,17 @@ Closure commonPackageConfig(String type, boolean oss) {
|
|||
// ========= systemd =========
|
||||
into('/usr/lib/tmpfiles.d') {
|
||||
from "${packagingFiles}/systemd/elasticsearch.conf"
|
||||
fileMode 0644
|
||||
}
|
||||
into('/usr/lib/systemd/system') {
|
||||
fileType CONFIG | NOREPLACE
|
||||
from "${packagingFiles}/systemd/elasticsearch.service"
|
||||
fileMode 0644
|
||||
}
|
||||
into('/usr/lib/sysctl.d') {
|
||||
fileType CONFIG | NOREPLACE
|
||||
from "${packagingFiles}/systemd/sysctl/elasticsearch.conf"
|
||||
fileMode 0644
|
||||
}
|
||||
|
||||
// ========= sysV init =========
|
||||
|
|
|
@ -1,172 +1,456 @@
|
|||
[[painless-casting]]
|
||||
=== Casting
|
||||
|
||||
Casting is the conversion of one type to another. Implicit casts are casts that
|
||||
occur automatically, such as during an assignment operation. Explicit casts are
|
||||
casts where you use the casting operator to explicitly convert one type to
|
||||
another. This is necessary during operations where the cast cannot be inferred.
|
||||
A cast converts the value of an original type to the equivalent value of a
|
||||
target type. An implicit cast infers the target type and automatically occurs
|
||||
during certain <<painless-operators, operations>>. An explicit cast specifies
|
||||
the target type and forcefully occurs as its own operation. Use the *cast
|
||||
operator* to specify an explicit cast.
|
||||
|
||||
To cast to a new type, precede the expression by the new type enclosed in
|
||||
parentheses, for example
|
||||
`(int)x`.
|
||||
*Errors*
|
||||
|
||||
The following sections specify the implicit casts that can be performed and the
|
||||
explicit casts that are allowed. The only other permitted cast is casting
|
||||
a single character `String` to a `char`.
|
||||
* If during a cast there exists no equivalent value for the target type.
|
||||
* If an implicit cast is given, but an explicit cast is required.
|
||||
|
||||
*Grammar:*
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
cast: '(' TYPE ')' expression
|
||||
----
|
||||
|
||||
[[numeric-casting]]
|
||||
==== Numeric Casting
|
||||
*Examples*
|
||||
|
||||
The following table shows the allowed implicit and explicit casts between
|
||||
numeric types. Read the table by row. To find out if you need to explicitly
|
||||
cast from type A to type B, find the row for type A and scan across to the
|
||||
column for type B.
|
||||
* Valid casts.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> int i = (int)5L;
|
||||
<2> Map m = new HashMap();
|
||||
<3> HashMap hm = (HashMap)m;
|
||||
----
|
||||
+
|
||||
<1> declare `int i`;
|
||||
explicit cast `long 5` to `int 5` -> `int 5`;
|
||||
assign `int 5` to `i`
|
||||
<2> declare `Map m`;
|
||||
allocate `HashMap` instance -> `HashMap reference`;
|
||||
implicit cast `HashMap reference` to `Map reference` -> `Map reference`;
|
||||
assign `Map reference` to `m`
|
||||
<3> declare `HashMap hm`;
|
||||
access `m` -> `Map reference`;
|
||||
explicit cast `Map reference` to `HashMap reference` -> `HashMap reference`;
|
||||
assign `HashMap reference` to `hm`
|
||||
|
||||
IMPORTANT: Explicit casts between numeric types can result in some data loss. A
|
||||
smaller numeric type cannot necessarily accommodate the value from a larger
|
||||
numeric type. You might also lose precision when casting from integer types
|
||||
to floating point types.
|
||||
[[numeric-type-casting]]
|
||||
==== Numeric Type Casting
|
||||
|
||||
A <<primitive-types, numeric type>> cast converts the value of an original
|
||||
numeric type to the equivalent value of a target numeric type. A cast between
|
||||
two numeric type values results in data loss when the value of the original
|
||||
numeric type is larger than the target numeric type can accommodate. A cast
|
||||
between an integer type value and a floating point type value can result in
|
||||
precision loss.
|
||||
|
||||
The allowed casts for values of each numeric type are shown as a row in the
|
||||
following table:
|
||||
|
||||
|====
|
||||
| | byte | short | char | int | long | float | double
|
||||
| byte | | implicit | implicit | implicit | implicit | implicit | implicit
|
||||
| short | explicit | | explicit | implicit | implicit | implicit | implicit
|
||||
| char | explicit | explicit | | implicit | implicit | implicit | implicit
|
||||
| int | explicit | explicit | explicit | | implicit | implicit | implicit
|
||||
| long | explicit | explicit | explicit | explicit | | implicit | implicit
|
||||
| float | explicit | explicit | explicit | explicit | explicit | | implicit
|
||||
| | byte | short | char | int | long | float | double
|
||||
| byte | | implicit | implicit | implicit | implicit | implicit | implicit
|
||||
| short | explicit | | explicit | implicit | implicit | implicit | implicit
|
||||
| char | explicit | explicit | | implicit | implicit | implicit | implicit
|
||||
| int | explicit | explicit | explicit | | implicit | implicit | implicit
|
||||
| long | explicit | explicit | explicit | explicit | | implicit | implicit
|
||||
| float | explicit | explicit | explicit | explicit | explicit | | implicit
|
||||
| double | explicit | explicit | explicit | explicit | explicit | explicit |
|
||||
|====
|
||||
|
||||
*Examples*
|
||||
|
||||
Example(s)
|
||||
[source,Java]
|
||||
* Valid numeric type casts.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
int a = 1; // Declare int variable a and set it to the literal
|
||||
// value 1
|
||||
long b = a; // Declare long variable b and set it to int variable
|
||||
// a with an implicit cast to convert from int to long
|
||||
short c = (short)b; // Declare short variable c, explicitly cast b to a
|
||||
// short, and assign b to c
|
||||
byte d = a; // ERROR: Casting an int to a byte requires an explicit
|
||||
// cast
|
||||
double e = (double)a; // Explicitly cast int variable a to a double and assign
|
||||
// it to the double variable e. The explicit cast is
|
||||
// allowed, but it is not necessary.
|
||||
<1> int a = 1;
|
||||
<2> long b = a;
|
||||
<3> short c = (short)b;
|
||||
<4> double e = (double)a;
|
||||
----
|
||||
|
||||
[[reference-casting]]
|
||||
==== Reference Casting
|
||||
|
||||
A reference type can be implicitly cast to another reference type as long as
|
||||
the type being cast _from_ is a descendant of the type being cast _to_. A
|
||||
reference type can be explicitly cast _to_ if the type being cast to is a
|
||||
descendant of the type being cast _from_.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
+
|
||||
<1> declare `int a`;
|
||||
assign `int 1` to `a`
|
||||
<2> declare `long b`;
|
||||
access `a` -> `int 1`;
|
||||
implicit cast `int 1` to `long 1` -> `long 1`;
|
||||
assign `long 1` to `b`
|
||||
<3> declare `short c`;
|
||||
access `b` -> `long 1`;
|
||||
explicit cast `long 1` to `short 1` -> `short 1`;
|
||||
assign `short 1` value to `c`
|
||||
<4> declare `double e`;
|
||||
access `a` -> `int 1`;
|
||||
explicit cast `int 1` to `double 1.0`;
|
||||
assign `double 1.0` to `e`;
|
||||
(note the explicit cast is extraneous since an implicit cast is valid)
|
||||
+
|
||||
* Invalid numeric type casts resulting in errors.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
List x; // Declare List variable x
|
||||
ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a
|
||||
// newly allocated ArrayList [1]
|
||||
x = y; // Assign Arraylist y to List x using an
|
||||
// implicit cast
|
||||
y = (ArrayList)x; // Explicitly cast List x to an ArrayList and
|
||||
// assign it to ArrayList y
|
||||
x = (List)y; // Set List x to ArrayList y using an explicit
|
||||
// cast (the explicit cast is not necessary)
|
||||
y = x; // ERROR: List x cannot be implicitly cast to
|
||||
// an ArrayList, an explicit cast is required
|
||||
Map m = y; // ERROR: Cannot implicitly or explicitly cast [2]
|
||||
// an ArrayList to a Map, no relationship
|
||||
// exists between the two types.
|
||||
<1> int a = 1.0; // error
|
||||
<2> int b = 2;
|
||||
<3> byte c = b; // error
|
||||
----
|
||||
[1] `ArrayList` is a descendant of the `List` type.
|
||||
[2] `Map` is unrelated to the `List` and `ArrayList` types.
|
||||
+
|
||||
<1> declare `int i`;
|
||||
*error* -> cannot implicit cast `double 1.0` to `int 1`;
|
||||
(note an explicit cast is valid)
|
||||
<2> declare `int b`;
|
||||
assign `int 2` to `b`
|
||||
<3> declare byte `c`;
|
||||
access `b` -> `int 2`;
|
||||
*error* -> cannot implicit cast `int 2` to `byte 2`;
|
||||
(note an explicit cast is valid)
|
||||
|
||||
[[def-type-casting]]
|
||||
==== def Type Casting
|
||||
All primitive and reference types can always be implicitly cast to
|
||||
`def`. While it is possible to explicitly cast to `def`, it is not necessary.
|
||||
[[reference-type-casting]]
|
||||
==== Reference Type Casting
|
||||
|
||||
However, it is not always possible to implicitly cast a `def` to other
|
||||
primitive and reference types. An explicit cast is required if an explicit
|
||||
cast would normally be required between the non-def types.
|
||||
A <<reference-types, reference type>> cast converts the value of an original
|
||||
reference type to the equivalent value of a target reference type. An implicit
|
||||
cast between two reference type values is allowed when the original reference
|
||||
type is a descendant of the target type. An explicit cast between two reference
|
||||
type values is allowed when the original type is a descendant of the target type
|
||||
or the target type is a descendant of the original type.
|
||||
|
||||
*Examples*
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
* Valid reference type casts.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
def x; // Declare def variable x and set it to null
|
||||
x = 3; // Set the def variable x to the literal 3 with an implicit
|
||||
// cast from int to def
|
||||
double a = x; // Declare double variable a and set it to def variable x,
|
||||
// which contains a double
|
||||
int b = x; // ERROR: Results in a run-time error because an explicit cast is
|
||||
// required to cast from a double to an int
|
||||
int c = (int)x; // Declare int variable c, explicitly cast def variable x to an
|
||||
// int, and assign x to c
|
||||
<1> List x;
|
||||
<2> ArrayList y = new ArrayList();
|
||||
<3> x = y;
|
||||
<4> y = (ArrayList)x;
|
||||
<5> x = (List)y;
|
||||
----
|
||||
+
|
||||
<1> declare `List x`;
|
||||
assign default value `null` to `x`
|
||||
<2> declare `ArrayList y`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
assign `ArrayList reference` to `y`;
|
||||
<3> access `y` -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `x`;
|
||||
(note `ArrayList` is a descendant of `List`)
|
||||
<4> access `x` -> `List reference`;
|
||||
explicit cast `List reference` to `ArrayList reference`
|
||||
-> `ArrayList reference`;
|
||||
assign `ArrayList reference` to `y`;
|
||||
<5> access `y` -> `ArrayList reference`;
|
||||
explicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `x`;
|
||||
(note the explicit cast is extraneous, and an implicit cast is valid)
|
||||
+
|
||||
* Invalid reference type casts resulting in errors.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> List x = new ArrayList();
|
||||
<2> ArrayList y = x; // error
|
||||
<3> Map m = (Map)x; // error
|
||||
----
|
||||
+
|
||||
<1> declare `List x`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `x`
|
||||
<2> declare `ArrayList y`;
|
||||
access `x` -> `List reference`;
|
||||
*error* -> cannot implicit cast `List reference` to `ArrayList reference`;
|
||||
(note an explicit cast is valid since `ArrayList` is a descendant of `List`)
|
||||
<3> declare `ArrayList y`;
|
||||
access `x` -> `List reference`;
|
||||
*error* -> cannot explicit cast `List reference` to `Map reference`;
|
||||
(note no cast would be valid since neither `List` nor `Map` is a descendant
|
||||
of the other)
|
||||
|
||||
[[dynamic-type-casting]]
|
||||
==== Dynamic Type Casting
|
||||
|
||||
A <<dynamic-types, dynamic (`def`) type>> cast converts the value of an original
|
||||
`def` type to the equivalent value of any target type or converts the value of
|
||||
any original type to the equivalent value of a target `def` type.
|
||||
|
||||
An implicit cast from any original type value to a `def` type value is always
|
||||
allowed. An explicit cast from any original type value to a `def` type value is
|
||||
always allowed but never necessary.
|
||||
|
||||
An implicit or explicit cast from an original `def` type value to
|
||||
any target type value is allowed if and only if the cast is normally allowed
|
||||
based on the current type value the `def` type value represents.
|
||||
|
||||
*Examples*
|
||||
|
||||
* Valid dynamic type casts with any original type to a target `def` type.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> def d0 = 3;
|
||||
<2> d0 = new ArrayList();
|
||||
<3> Object o = new HashMap();
|
||||
<4> def d1 = o;
|
||||
<5> int i = d1.size();
|
||||
----
|
||||
+
|
||||
<1> declare `def d0`;
|
||||
implicit cast `int 3` to `def`;
|
||||
assign `int 3` to `d0`
|
||||
<2> allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `def` -> `def`;
|
||||
assign `def` to `d0`
|
||||
<3> declare `Object o`;
|
||||
allocate `HashMap` instance -> `HashMap reference`;
|
||||
implicit cast `HashMap reference` to `Object reference`
|
||||
-> `Object reference`;
|
||||
assign `Object reference` to `o`
|
||||
<4> declare `def d1`;
|
||||
access `o` -> `Object reference`;
|
||||
implicit cast `Object reference` to `def` -> `def`;
|
||||
assign `def` to `d1`
|
||||
<5> declare `int i`;
|
||||
access `d1` -> `def`;
|
||||
implicit cast `def` to `HashMap reference` -> HashMap reference`;
|
||||
call `size` on `HashMap reference` -> `int 0`;
|
||||
assign `int 0` to `i`;
|
||||
(note `def` was implicit cast to `HashMap reference` since `HashMap` is the
|
||||
child-most descendant type value that the `def` type value
|
||||
represents)
|
||||
+
|
||||
* Valid dynamic type casts with an original `def` type to any target type.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> def d = 1.0;
|
||||
<2> int i = (int)d;
|
||||
<3> d = 1;
|
||||
<4> float f = d;
|
||||
<5> d = new ArrayList();
|
||||
<6> List l = d;
|
||||
----
|
||||
+
|
||||
<1> declare `def d`;
|
||||
implicit cast `double 1.0` to `def` -> `def`;
|
||||
assign `def` to `d`
|
||||
<2> declare `int i`;
|
||||
access `d` -> `def`;
|
||||
implicit cast `def` to `double 1.0` -> `double 1.0`;
|
||||
explicit cast `double 1.0` to `int 1` -> `int 1`;
|
||||
assign `int 1` to `i`;
|
||||
(note the explicit cast is necessary since a `double` value cannot be
|
||||
converted to an `int` value implicitly)
|
||||
<3> assign `int 1` to `d`;
|
||||
(note the switch in the type `d` represents from `double` to `int`)
|
||||
<4> declare `float i`;
|
||||
access `d` -> `def`;
|
||||
implicit cast `def` to `int 1` -> `int 1`;
|
||||
implicit cast `int 1` to `float 1.0` -> `float 1.0`;
|
||||
assign `float 1.0` to `f`
|
||||
<5> allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
assign `ArrayList reference` to `d`;
|
||||
(note the switch in the type `d` represents from `int` to `ArrayList`)
|
||||
<6> declare `List l`;
|
||||
access `d` -> `def`;
|
||||
implicit cast `def` to `ArrayList reference` -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `l`
|
||||
+
|
||||
* Invalid dynamic type casts resulting in errors.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> def d = 1;
|
||||
<2> short s = d; // error
|
||||
<3> d = new HashMap();
|
||||
<4> List l = d; // error
|
||||
----
|
||||
<1> declare `def d`;
|
||||
implicit cast `int 1` to `def` -> `def`;
|
||||
assign `def` to `d`
|
||||
<2> declare `short s`;
|
||||
access `d` -> `def`;
|
||||
implicit cast `def` to `int 1` -> `int 1`;
|
||||
*error* -> cannot implicit cast `int 1` to `short 1`;
|
||||
(note an explicit cast is valid)
|
||||
<3> allocate `HashMap` instance -> `HashMap reference`;
|
||||
implicit cast `HashMap reference` to `def` -> `def`;
|
||||
assign `def` to `d`
|
||||
<4> declare `List l`;
|
||||
access `d` -> `def`;
|
||||
implicit cast `def` to `HashMap reference`;
|
||||
*error* -> cannot implicit cast `HashMap reference` to `List reference`;
|
||||
(note no cast would be valid since neither `HashMap` nor `List` is a
|
||||
descendant of the other)
|
||||
|
||||
[[string-character-casting]]
|
||||
==== String to Character Casting
|
||||
|
||||
Use the *cast operator* to convert a <<string-type, `String` type>> value into a
|
||||
<<primitive-types, `char` type>> value.
|
||||
|
||||
*Errors*
|
||||
|
||||
* If the `String` type value isn't one character in length.
|
||||
* If the `String` type value is `null`.
|
||||
|
||||
*Examples*
|
||||
|
||||
* Casting string literals into `char` type values.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> char c = (char)"C"
|
||||
<2> c = (char)'c'
|
||||
----
|
||||
+
|
||||
<1> declare `char c`;
|
||||
explicit cast `String "C"` to `char C` -> `char C`;
|
||||
assign `char C` to `c`
|
||||
<2> explicit cast `String 'c'` to `char c` -> `char c`;
|
||||
assign `char c` to `c`
|
||||
+
|
||||
* Casting a `String` reference into a `char` value.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> String s = "s";
|
||||
<2> char c = (char)s;
|
||||
----
|
||||
<1> declare `String s`;
|
||||
assign `String "s"` to `s`;
|
||||
<2> declare `char c`
|
||||
access `s` -> `String "s"`;
|
||||
explicit cast `String "s"` to `char s` -> `char s`;
|
||||
assign `char s` to `c`
|
||||
|
||||
[[boxing-unboxing]]
|
||||
==== Boxing and Unboxing
|
||||
|
||||
Boxing is where a cast is used to convert a primitive type to its corresponding
|
||||
reference type. Unboxing is the reverse, converting a reference type to the
|
||||
corresponding primitive type.
|
||||
Boxing is a special type of cast used to convert a primitive type to its
|
||||
corresponding reference type. Unboxing is the reverse used to convert a
|
||||
reference type to its corresponding primitive type.
|
||||
|
||||
There are two places Painless performs implicit boxing and unboxing:
|
||||
Implicit boxing/unboxing occurs during the following operations:
|
||||
|
||||
* When you call methods, Painless automatically boxes and unboxes arguments
|
||||
so you can specify either primitive types or their corresponding reference
|
||||
types.
|
||||
* When you use the `def` type, Painless automatically boxes and unboxes as
|
||||
needed when converting to and from `def`.
|
||||
* Conversions between a `def` type and a primitive type will be implicitly
|
||||
boxed/unboxed as necessary, though this is referred to as an implicit cast
|
||||
throughout the documentation.
|
||||
* Method/function call arguments will be implicitly boxed/unboxed as necessary.
|
||||
* A primitive type value will be implicitly boxed when a reference type method
|
||||
call is invoked on it.
|
||||
|
||||
The casting operator does not support any way to explicitly box a primitive
|
||||
type or unbox a reference type.
|
||||
Explicit boxing/unboxing is not allowed. Use the reference type API to
|
||||
explicitly convert a primitive type value to its respective reference type
|
||||
value and vice versa.
|
||||
|
||||
If a primitive type needs to be converted to a reference type, the Painless
|
||||
reference type API supports methods that can do that. However, under normal
|
||||
circumstances this should not be necessary.
|
||||
*Errors*
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
* If an explicit cast is made to box/unbox a primitive type.
|
||||
|
||||
*Examples*
|
||||
|
||||
* Uses of implicit boxing/unboxing.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
Integer x = 1; // ERROR: not a legal implicit cast
|
||||
Integer y = (Integer)1; // ERROR: not a legal explicit cast
|
||||
int a = new Integer(1); // ERROR: not a legal implicit cast
|
||||
int b = (int)new Integer(1); // ERROR: not a legal explicit cast
|
||||
<1> List l = new ArrayList();
|
||||
<2> l.add(1);
|
||||
<3> Integer I = Integer.valueOf(0);
|
||||
<4> int i = l.get(i);
|
||||
----
|
||||
+
|
||||
<1> declare `List l`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
assign `ArrayList reference` to `l`;
|
||||
<2> access `l` -> `List reference`;
|
||||
implicit cast `int 1` to `def` -> `def`;
|
||||
call `add` on `List reference` with arguments (`def`);
|
||||
(note internally `int 1` is boxed to `Integer 1` to store as a `def` type
|
||||
value)
|
||||
<3> declare `Integer I`;
|
||||
call `valueOf` on `Integer` with arguments of (`int 0`) -> `Integer 0`;
|
||||
assign `Integer 0` to `I`;
|
||||
<4> declare `int i`;
|
||||
access `I` -> `Integer 0`;
|
||||
unbox `Integer 0` -> `int 0`;
|
||||
access `l` -> `List reference`;
|
||||
call `get` on `List reference` with arguments (`int 0`) -> `def`;
|
||||
implicit cast `def` to `int 1` -> `int 1`;
|
||||
assign `int 1` to `i`;
|
||||
(note internally `int 1` is unboxed from `Integer 1` when loaded from a
|
||||
`def` type value)
|
||||
+
|
||||
* Uses of invalid boxing/unboxing resulting in errors.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> Integer x = 1; // error
|
||||
<2> Integer y = (Integer)1; // error
|
||||
<3> int a = Integer.valueOf(1); // error
|
||||
<4> int b = (int)Integer.valueOf(1); // error
|
||||
----
|
||||
+
|
||||
<1> declare `Integer x`;
|
||||
*error* -> cannot implicit box `int 1` to `Integer 1` during assignment
|
||||
<2> declare `Integer y`;
|
||||
*error* -> cannot explicit box `int 1` to `Integer 1` during assignment
|
||||
<3> declare `int a`;
|
||||
call `valueOf` on `Integer` with arguments of (`int 1`) -> `Integer 1`;
|
||||
*error* -> cannot implicit unbox `Integer 1` to `int 1` during assignment
|
||||
<4> declare `int a`;
|
||||
call `valueOf` on `Integer` with arguments of (`int 1`) -> `Integer 1`;
|
||||
*error* -> cannot explicit unbox `Integer 1` to `int 1` during assignment
|
||||
|
||||
[[promotion]]
|
||||
==== Promotion
|
||||
|
||||
Promotion is where certain operations require types to be either a minimum
|
||||
numerical type or for two (or more) types to be equivalent.
|
||||
The documentation for each operation that has these requirements
|
||||
includes promotion tables that describe how this is handled.
|
||||
Promotion is when a single value is implicitly cast to a certain type or
|
||||
multiple values are implicitly cast to the same type as required for evaluation
|
||||
by certain operations. Each operation that requires promotion has a promotion
|
||||
table that shows all required implicit casts based on the type(s) of value(s). A
|
||||
value can be promoted to a `def` type at compile-time; however, the promoted
|
||||
type value is derived from what the `def` type value represents at run-time.
|
||||
|
||||
When an operation promotes a type or types, the resultant type
|
||||
of the operation is the promoted type. Types can be promoted to def
|
||||
at compile-time; however, at run-time, the resultant type will be the
|
||||
promotion of the types the `def` is representing.
|
||||
*Errors*
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
* If a specific operation cannot find an allowed promotion type for the type(s)
|
||||
of value(s) given.
|
||||
|
||||
*Examples*
|
||||
|
||||
* Uses of promotion.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal
|
||||
// 2 is promoted to a double and the resulting value is a double.
|
||||
|
||||
def x = 1; // Declare def variable x and set it to the literal int 1 through
|
||||
// an implicit cast
|
||||
x + 2.0F // Add def variable x and the literal float 2.0.
|
||||
// At compile-time the types are promoted to def.
|
||||
// At run-time the types are promoted to float.
|
||||
<1> double d = 2 + 2.0;
|
||||
<2> def x = 1;
|
||||
<3> float f = x + 2.0F;
|
||||
----
|
||||
<1> declare `double d`;
|
||||
promote `int 2` and `double 2.0 @0` -> `double 2.0 @0`;
|
||||
implicit cast `int 2` to `double 2.0 @1` -> `double 2.0 @1`;
|
||||
add `double 2.0 @1` and `double 2.0 @0` -> `double 4.0`;
|
||||
assign `double 4.0` to `d`
|
||||
<2> declare `def x`;
|
||||
implicit cast `int 1` to `def` -> `def`;
|
||||
assign `def` to `x`;
|
||||
<3> declare `float f`;
|
||||
access `x` -> `def`;
|
||||
implicit cast `def` to `int 1` -> `int 1`;
|
||||
promote `int 1` and `float 2.0` -> `float 2.0`;
|
||||
implicit cast `int 1` to `float 1.0` -> `float `1.0`;
|
||||
add `float 1.0` and `float 2.0` -> `float 3.0`;
|
||||
assign `float 3.0` to `f`;
|
||||
(note this example illustrates promotion done at run-time as promotion
|
||||
done at compile-time would have resolved to a `def` type value)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
[[painless-comments]]
|
||||
=== Comments
|
||||
|
||||
Use the `//` token anywhere on a line to specify a single-line comment. All
|
||||
characters from the `//` token to the end of the line are ignored. Use an
|
||||
opening `/*` token and a closing `*/` token to specify a multi-line comment.
|
||||
Multi-line comments can start anywhere on a line, and all characters in between
|
||||
the `/*` token and `*/` token are ignored. Comments can be included anywhere
|
||||
within a script.
|
||||
Use a comment to annotate or explain code within a script. Use the `//` token
|
||||
anywhere on a line to specify a single-line comment. All characters from the
|
||||
`//` token to the end of the line are ignored. Use an opening `/*` token and a
|
||||
closing `*/` token to specify a multi-line comment. Multi-line comments can
|
||||
start anywhere on a line, and all characters in between the `/*` token and `*/`
|
||||
token are ignored. Comments can be included anywhere within a script.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
[[painless-identifiers]]
|
||||
=== Identifiers
|
||||
|
||||
Specify identifiers to <<declaration, declare>>, <<assignment, assign>>, and
|
||||
<<painless-operators, use>> variables, <<dot-operator, access fields>>, and
|
||||
<<dot-operator, call methods>>. <<painless-keywords, Keywords>> and
|
||||
<<painless-types, types>> cannot be used as identifiers.
|
||||
Use an identifier as a named token to specify a
|
||||
<<painless-variables, variable>>, <<painless-types, type>>,
|
||||
<<dot-operator, field>>, <<dot-operator, method>>, or function.
|
||||
<<painless-keywords, Keywords>> cannot be used as identifiers.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[[painless-keywords]]
|
||||
=== Keywords
|
||||
|
||||
The keywords in the table below are reserved for built-in language
|
||||
features. These keywords cannot be used as
|
||||
<<painless-identifiers, identifiers>> or <<painless-types, types>>.
|
||||
Keywords are reserved tokens for built-in language features and cannot be used
|
||||
as <<painless-identifiers, identifiers>> within a script. The following are
|
||||
keywords:
|
||||
|
||||
[cols="^1,^1,^1,^1,^1"]
|
||||
|====
|
||||
|
|
|
@ -6,7 +6,7 @@ Painless syntax is similar to Java syntax along with some additional
|
|||
features such as dynamic typing, Map and List accessor shortcuts, and array
|
||||
initializers. As a direct comparison to Java, there are some important
|
||||
differences, especially related to the casting model. For more detailed
|
||||
conceptual information about the basic constructs that Java and Painless share,
|
||||
conceptual information about the basic constructs that Painless and Java share,
|
||||
refer to the corresponding topics in the
|
||||
https://docs.oracle.com/javase/specs/jls/se8/html/index.html[Java Language
|
||||
Specification].
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
[[painless-literals]]
|
||||
=== Literals
|
||||
|
||||
Use literals to specify different types of values directly in a script.
|
||||
Use a literal to specify a value directly in an
|
||||
<<painless-operators, operation>>.
|
||||
|
||||
[[integers]]
|
||||
==== Integers
|
||||
|
||||
Use integer literals to specify an integer value in decimal, octal, or hex
|
||||
notation of the <<primitive-types, primitive types>> `int`, `long`, `float`,
|
||||
Use an integer literal to specify an integer type value in decimal, octal, or
|
||||
hex notation of a <<primitive-types, primitive type>> `int`, `long`, `float`,
|
||||
or `double`. Use the following single letter designations to specify the
|
||||
<<primitive-types, primitive type>>: `l` or `L` for `long`, `f` or `F` for
|
||||
`float`, and `d` or `D` for `double`. If not specified, the type defaults to
|
||||
`int`. Use `0` as a prefix to specify an integer literal as octal, and use
|
||||
`0x` or `0X` as a prefix to specify an integer literal as hex.
|
||||
primitive type: `l` or `L` for `long`, `f` or `F` for `float`, and `d` or `D`
|
||||
for `double`. If not specified, the type defaults to `int`. Use `0` as a prefix
|
||||
to specify an integer literal as octal, and use `0x` or `0X` as a prefix to
|
||||
specify an integer literal as hex.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
|
@ -46,11 +47,10 @@ HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?;
|
|||
[[floats]]
|
||||
==== Floats
|
||||
|
||||
Use floating point literals to specify a floating point value of the
|
||||
<<primitive-types, primitive types>> `float` or `double`. Use the following
|
||||
single letter designations to specify the <<primitive-types, primitive type>>:
|
||||
`f` or `F` for `float` and `d` or `D` for `double`. If not specified, the type defaults
|
||||
to `double`.
|
||||
Use a floating point literal to specify a floating point type value of a
|
||||
<<primitive-types, primitive type>> `float` or `double`. Use the following
|
||||
single letter designations to specify the primitive type: `f` or `F` for `float`
|
||||
and `d` or `D` for `double`. If not specified, the type defaults to `double`.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
|
@ -81,7 +81,7 @@ EXPONENT: ( [eE] [+\-]? [0-9]+ );
|
|||
[[strings]]
|
||||
==== Strings
|
||||
|
||||
Use string literals to specify <<string-type, String>> values with
|
||||
Use a string literal to specify a <<string-type, `String` type>> value with
|
||||
either single-quotes or double-quotes. Use a `\"` token to include a
|
||||
double-quote as part of a double-quoted string literal. Use a `\'` token to
|
||||
include a single-quote as part of a single-quoted string literal. Use a `\\`
|
||||
|
@ -117,26 +117,6 @@ STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' )
|
|||
[[characters]]
|
||||
==== Characters
|
||||
|
||||
Use the <<painless-casting, casting operator>> to convert string literals or
|
||||
<<string-type, String>> values into <<primitive-types, char>> values.
|
||||
<<string-type, String>> values converted into
|
||||
<<primitive-types, char>> values must be exactly one character in length
|
||||
or an error will occur.
|
||||
|
||||
*Examples*
|
||||
|
||||
* Casting string literals into <<primitive-types, char>> values.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
(char)"C"
|
||||
(char)'c'
|
||||
----
|
||||
+
|
||||
* Casting a <<string-type, String>> value into a <<primitive-types, char>> value.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
String s = "s";
|
||||
char c = (char)s;
|
||||
----
|
||||
A character literal cannot be specified directly. Instead, use the
|
||||
<<string-character-casting, cast operator>> to convert a `String` type value
|
||||
into a `char` type value.
|
||||
|
|
|
@ -240,6 +240,7 @@ operator. See Function Calls [MARK] for more information.
|
|||
The brackets operator `[]` is used to create and access arrays, lists, and maps.
|
||||
The braces operator `{}` is used to intialize arrays.
|
||||
|
||||
[[array-initialization]]
|
||||
===== Creating and Initializing Arrays
|
||||
|
||||
You create and initialize arrays using the brackets `[]` and braces `{}`
|
||||
|
@ -248,9 +249,49 @@ initialize each dimension with are specified as a comma-separated list enclosed
|
|||
in braces. For example, `new int[] {1, 2, 3}` creates a one dimensional `int`
|
||||
array with a size of 3 and the values 1, 2, and 3.
|
||||
|
||||
For more information about allocating and initializing arrays, see <<array-type,
|
||||
Array Type>>.
|
||||
To allocate an array, you use the `new` keyword followed by the type and a
|
||||
set of brackets for each dimension. You can explicitly define the size of each dimension by specifying an expression within the brackets, or initialize each
|
||||
dimension with the desired number of values. The allocated size of each
|
||||
dimension is its permanent size.
|
||||
|
||||
To initialize an array, specify the values you want to initialize
|
||||
each dimension with as a comma-separated list of expressions enclosed in braces.
|
||||
For example, `new int[] {1, 2, 3}` creates a one-dimensional `int` array with a
|
||||
size of 3 and the values 1, 2, and 3.
|
||||
|
||||
When you initialize an array, the order of the expressions is maintained. Each expression used as part of the initialization is converted to the
|
||||
array's type. An error occurs if the types do not match.
|
||||
|
||||
*Grammar:*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
declare_array: TYPE ('[' ']')+;
|
||||
|
||||
array_initialization: 'new' TYPE '[' ']' '{' expression (',' expression) '}'
|
||||
| 'new' TYPE '[' ']' '{' '}';
|
||||
----
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
int[] x = new int[5]; // Declare int array x and assign it a newly
|
||||
// allocated int array with a size of 5
|
||||
def[][] y = new def[5][5]; // Declare the 2-dimensional def array y and
|
||||
// assign it a newly allocated 2-dimensional
|
||||
// array where both dimensions have a size of 5
|
||||
int[] x = new int[] {1, 2, 3}; // Declare int array x and set it to an int
|
||||
// array with values 1, 2, 3 and a size of 3
|
||||
int i = 1;
|
||||
long l = 2L;
|
||||
float f = 3.0F;
|
||||
double d = 4.0;
|
||||
String s = "5";
|
||||
def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to
|
||||
// a def array with a size of 4 and the
|
||||
// values i, l, f*d, and s
|
||||
----
|
||||
|
||||
[[array-access]]
|
||||
===== Accessing Array Elements
|
||||
|
||||
Elements in an array are stored and accessed using the brackets `[]` operator.
|
||||
|
@ -298,6 +339,7 @@ return d[z]; // Access the 1st element of array d using the
|
|||
NOTE: The use of the `def` type in the second example means that the types
|
||||
cannot be resolved until runtime.
|
||||
|
||||
[[array-length]]
|
||||
===== Array Length
|
||||
|
||||
Arrays contain a special member known as 'length' that is a read-only value that contains the size of the array. This member can be accessed from an array using the dot operator.
|
||||
|
@ -727,6 +769,7 @@ def e; // declares the def variable e
|
|||
e = new HashMap(m); // sets e to a newly allocated HashMap using the constructor with a single argument m
|
||||
----
|
||||
|
||||
[[new-array]]
|
||||
==== New Array
|
||||
|
||||
An array type instance can be allocated using the new operator. The format starts with the new operator followed by the type followed by a series of opening and closing braces each containing an expression for the size of the dimension.
|
||||
|
|
|
@ -1,269 +1,466 @@
|
|||
[[painless-types]]
|
||||
=== Types
|
||||
|
||||
Painless supports both dynamic and static types. Static types are split into
|
||||
_primitive types_ and _reference types_.
|
||||
|
||||
[[dynamic-types]]
|
||||
==== Dynamic Types
|
||||
|
||||
Painless supports one dynamic type: `def`. The `def` type can represent any
|
||||
primitive or reference type. When you use the `def` type, it mimics the exact
|
||||
behavior of whatever type it represents at runtime. The default value for the
|
||||
def type is `null.`
|
||||
|
||||
Internally, if the `def` type represents a primitive type, it is converted to the
|
||||
corresponding reference type. It still behaves like the primitive type, however,
|
||||
including within the casting model. The `def` type can be assigned to different
|
||||
types during the course of script execution.
|
||||
|
||||
IMPORTANT: Because a `def` type variable can be assigned to different types
|
||||
during execution, type conversion errors that occur when using the `def` type
|
||||
happen at runtime.
|
||||
|
||||
Using the `def` type can have a slight impact on performance. If performance is
|
||||
critical, it's better to declare static types.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
----
|
||||
def x = 1; // Declare def variable x and set it to the
|
||||
// literal int 1
|
||||
def l = new ArrayList(); // Declare def variable l and set it a newly
|
||||
// allocated ArrayList
|
||||
----
|
||||
A type is a classification of data used to define the properties of a value.
|
||||
These properties specify what data a value represents and the rules for how a
|
||||
value is evaluated during an <<painless-operators, operation>>. Each type
|
||||
belongs to one of the following categories: <<primitive-types, primitive>>,
|
||||
<<reference-types, reference>>, or <<dynamic-types, dynamic>>.
|
||||
|
||||
[[primitive-types]]
|
||||
==== Primitive Types
|
||||
|
||||
Primitive types are allocated directly onto the stack according to the standard
|
||||
Java memory model.
|
||||
A primitive type represents basic data built natively into the JVM and is
|
||||
allocated to non-heap memory. Declare a primitive type
|
||||
<<painless-variables, variable>>, and assign it a primitive type value for
|
||||
evaluation during later operations. The default value for a newly-declared
|
||||
primitive type variable is listed as part of the definitions below. A primitive
|
||||
type value is copied during an assignment or as an argument for a
|
||||
method/function call.
|
||||
|
||||
Primitive types can behave as their corresponding (<<boxing-unboxing, boxed>>)
|
||||
reference type. This means any piece of a reference type can be accessed or
|
||||
called through the primitive type. Operations performed in this manner convert
|
||||
the primitive type to its corresponding reference type at runtime and perform
|
||||
the field access or method call without needing to perform any other
|
||||
operations.
|
||||
A primitive type has a corresponding reference type (also known as a boxed
|
||||
type). Use the <<field-access, field access operator>> or
|
||||
<<method-access, method call operator>> on a primitive type value to force
|
||||
evaluation as its corresponding reference type value.
|
||||
|
||||
Painless supports the following primitive types.
|
||||
The following primitive types are available:
|
||||
|
||||
byte::
|
||||
An 8-bit, signed, two's complement integer.
|
||||
Range: [-128, 127].
|
||||
Default value: 0.
|
||||
Reference type: Byte.
|
||||
[horizontal]
|
||||
`byte`::
|
||||
8-bit, signed, two's complement integer
|
||||
* range: [`-128`, `127`]
|
||||
* default value: `0`
|
||||
* reference type: `Byte`
|
||||
|
||||
short::
|
||||
A 16-bit, signed, two's complement integer.
|
||||
Range: [-32768, 32767].
|
||||
Default value: 0.
|
||||
Reference type: Short.
|
||||
`short`::
|
||||
16-bit, signed, two's complement integer
|
||||
* range: [`-32768`, `32767`]
|
||||
* default value: `0`
|
||||
* reference type: `Short`
|
||||
|
||||
char::
|
||||
A 16-bit Unicode character.
|
||||
Range: [0, 65535].
|
||||
Default value: 0 or `\u0000`.
|
||||
Reference type: Character.
|
||||
`char`::
|
||||
16-bit, unsigned, Unicode character
|
||||
* range: [`0`, `65535`]
|
||||
* default value: `0` or `\u0000`
|
||||
* reference type: `Character`
|
||||
|
||||
int::
|
||||
A 32-bit, signed, two's complement integer.
|
||||
Range: [-2^32, 2^32-1].
|
||||
Default value: 0.
|
||||
Reference type: Integer.
|
||||
`int`::
|
||||
32-bit, signed, two's complement integer
|
||||
* range: [`-2^32`, `2^32-1`]
|
||||
* default value: `0`
|
||||
* reference type: `Integer`
|
||||
|
||||
long::
|
||||
A 64-bit, signed, two's complement integer.
|
||||
Range: [-2^64, 2^64-1].
|
||||
Default value: 0.
|
||||
Reference type: Long.
|
||||
`long`::
|
||||
64-bit, signed, two's complement integer
|
||||
* range: [`-2^64`, `2^64-1`]
|
||||
* default value: `0`
|
||||
* reference type: `Long`
|
||||
|
||||
float::
|
||||
A 32-bit, single-precision, IEEE 754 floating point number.
|
||||
Range: Depends on multiple factors.
|
||||
Default value: 0.0.
|
||||
Reference type: Float.
|
||||
`float`::
|
||||
32-bit, signed, single-precision, IEEE 754 floating point number
|
||||
* default value: `0.0`
|
||||
* reference type: `Float`
|
||||
|
||||
double::
|
||||
A 64-bit, double-precision, IEEE 754 floating point number.
|
||||
Range: Depends on multiple factors.
|
||||
Default value: 0.0.
|
||||
Reference type: Double.
|
||||
`double`::
|
||||
64-bit, signed, double-precision, IEEE 754 floating point number
|
||||
* default value: `0.0`
|
||||
* reference type: `Double`
|
||||
|
||||
boolean::
|
||||
A logical quanity with two possible values: true and false.
|
||||
Range: true/false.
|
||||
Default value: false.
|
||||
Reference type: Boolean.
|
||||
`boolean`::
|
||||
logical quantity with two possible values of `true` and `false`
|
||||
* default value: `false`
|
||||
* reference type: `Boolean`
|
||||
|
||||
*Examples*
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
* Primitive types used in declaration, declaration and assignment.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
int i = 1; // Declare variable i as an int and set it to the
|
||||
// literal 1
|
||||
double d; // Declare variable d as a double and set it to the
|
||||
// default value of 0.0
|
||||
boolean b = true; // Declare variable b as a boolean and set it to true
|
||||
<1> int i = 1;
|
||||
<2> double d;
|
||||
<3> boolean b = true;
|
||||
----
|
||||
|
||||
Using methods from the corresponding reference type on a primitive type.
|
||||
|
||||
[source,Java]
|
||||
+
|
||||
<1> declare `int i`;
|
||||
assign `int 1` to `i`
|
||||
<2> declare `double d`;
|
||||
assign default `double 0.0` to `d`
|
||||
<3> declare `boolean b`;
|
||||
assign `boolean true` to `b`
|
||||
+
|
||||
* Method call on a primitive type using the corresponding reference type.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
int i = 1; // Declare variable i as an int and set it to the
|
||||
// literal 1
|
||||
i.toString(); // Invokes the Integer method toString on variable i
|
||||
<1> int i = 1;
|
||||
<2> i.toString();
|
||||
----
|
||||
+
|
||||
<1> declare `int i`;
|
||||
assign `int 1` to `i`
|
||||
<2> access `i` -> `int 1`;
|
||||
box `int 1` -> `Integer 1 reference`;
|
||||
call `toString` on `Integer 1 reference` -> `String '1'`
|
||||
|
||||
[[reference-types]]
|
||||
==== Reference Types
|
||||
|
||||
Reference types are similar to Java classes and can contain multiple pieces
|
||||
known as _members_. However, reference types do not support access modifiers.
|
||||
You allocate reference type instances on the heap using the `new` operator.
|
||||
A reference type is a named construct (object), potentially representing
|
||||
multiple pieces of data (member fields) and logic to manipulate that data
|
||||
(member methods), defined as part of the application programming interface
|
||||
(API) for scripts.
|
||||
|
||||
Reference types can have both static and non-static members:
|
||||
A reference type instance is a single set of data for one reference type
|
||||
object allocated to the heap. Use the
|
||||
<<constructor-call, new instance operator>> to allocate a reference type
|
||||
instance. Use a reference type instance to load from, store to, and manipulate
|
||||
complex data.
|
||||
|
||||
* Static members are shared by all instances of the same reference type and
|
||||
can be accessed without allocating an instance of the reference type. For
|
||||
example `Integer.MAX_VALUE`.
|
||||
* Non-static members are specific to an instance of the reference type
|
||||
and can only be accessed through the allocated instance.
|
||||
A reference type value refers to a reference type instance, and multiple
|
||||
reference type values may refer to the same reference type instance. A change to
|
||||
a reference type instance will affect all reference type values referring to
|
||||
that specific instance.
|
||||
|
||||
The default value for a reference type is `null`, indicating that no memory has
|
||||
been allocated for it. When you assign `null` to a reference type, its previous
|
||||
value is discarded and garbage collected in accordance with the Java memory
|
||||
model as long as there are no other references to that value.
|
||||
Declare a reference type <<painless-variables, variable>>, and assign it a
|
||||
reference type value for evaluation during later operations. The default value
|
||||
for a newly-declared reference type variable is `null`. A reference type value
|
||||
is shallow-copied during an assignment or as an argument for a method/function
|
||||
call. Assign `null` to a reference type variable to indicate the reference type
|
||||
value refers to no reference type instance. The JVM will garbage collect a
|
||||
reference type instance when it is no longer referred to by any reference type
|
||||
values. Pass `null` as an argument to a method/function call to indicate the
|
||||
argument refers to no reference type instance.
|
||||
|
||||
A reference type can contain:
|
||||
A reference type object defines zero-to-many of each of the following:
|
||||
|
||||
* Zero to many primitive types. Primitive type members can be static or
|
||||
non-static and read-only or read-write.
|
||||
* Zero to many reference types. Reference type members can be static or
|
||||
non-static and read-only or read-write.
|
||||
* Methods that call an internal function to return a value and/or manipulate
|
||||
the primitive or reference type members. Method members can be static or
|
||||
non-static.
|
||||
* Constructors that call an internal function to return a newly-allocated
|
||||
reference type instance. Constructors are non-static methods that can
|
||||
optionally manipulate the primitive and reference type members.
|
||||
static member field::
|
||||
|
||||
Reference types support a Java-style inheritance model. Consider types A and B.
|
||||
Type A is considered to be a parent of B, and B a child of A, if B inherits
|
||||
(is able to access as its own) all of A's fields and methods. Type B is
|
||||
A static member field is a named and typed piece of data. Each reference type
|
||||
*object* contains one set of data representative of its static member fields.
|
||||
Use the <<field-access, field access operator>> in correspondence with the
|
||||
reference type object name to access a static member field for loading and
|
||||
storing to a specific reference type *object*. No reference type instance
|
||||
allocation is necessary to use a static member field.
|
||||
|
||||
non-static member field::
|
||||
|
||||
A non-static member field is a named and typed piece of data. Each reference
|
||||
type *instance* contains one set of data representative of its reference type
|
||||
object's non-static member fields. Use the
|
||||
<<field-access, field access operator>> for loading and storing to a non-static
|
||||
member field of a specific reference type *instance*. An allocated reference
|
||||
type instance is required to use a non-static member field.
|
||||
|
||||
static member method::
|
||||
|
||||
A static member method is a function called on a reference type *object*. Use
|
||||
the <<method-access, method call operator>> in correspondence with the reference
|
||||
type object name to call a static member method. No reference type instance
|
||||
allocation is necessary to use a static member method.
|
||||
|
||||
non-static member method::
|
||||
|
||||
A non-static member method is a function called on a reference type *instance*.
|
||||
A non-static member method called on a reference type instance can load from and
|
||||
store to non-static member fields of that specific reference type instance. Use
|
||||
the <<method-access, method call operator>> in correspondence with a specific
|
||||
reference type instance to call a non-static member method. An allocated
|
||||
reference type instance is required to use a non-static member method.
|
||||
|
||||
constructor::
|
||||
|
||||
A constructor is a special type of function used to allocate a reference type
|
||||
*instance* defined by a specific reference type *object*. Use the
|
||||
<<constructor-call, new instance operator>> to allocate a reference type
|
||||
instance.
|
||||
|
||||
A reference type object follows a basic inheritance model. Consider types A and
|
||||
B. Type A is considered to be a parent of B, and B a child of A, if B inherits
|
||||
(is able to access as its own) all of A's non-static members. Type B is
|
||||
considered a descendant of A if there exists a recursive parent-child
|
||||
relationship from B to A with none to many types in between. In this case, B
|
||||
inherits all of A's fields and methods along with all of the fields and
|
||||
methods of the types in between. Type B is also considered to be a type A
|
||||
in both relationships.
|
||||
inherits all of A's non-static members along with all of the non-static members
|
||||
of the types in between. Type B is also considered to be a type A in both
|
||||
relationships.
|
||||
|
||||
For the complete list of Painless reference types and their supported methods,
|
||||
see the https://www.elastic.co/guide/en/elasticsearch/reference/current/painless-api-reference.html[Painless API Reference].
|
||||
*Examples*
|
||||
|
||||
For more information about working with reference types, see
|
||||
<<field-access, Accessing Fields>> and <<method-access, Calling Methods>>.
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
* Reference types evaluated in several different operations.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
ArrayList al = new ArrayList(); // Declare variable al as an ArrayList and
|
||||
// set it to a newly allocated ArrayList
|
||||
List l = new ArrayList(); // Declare variable l as a List and set
|
||||
// it to a newly allocated ArrayList, which is
|
||||
// allowed because ArrayList inherits from List
|
||||
Map m; // Declare variable m as a Map and set it
|
||||
// to the default value of null
|
||||
<1> List l = new ArrayList();
|
||||
<2> l.add(1);
|
||||
<3> int i = l.get(0) + 2;
|
||||
----
|
||||
+
|
||||
<1> declare `List l`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `l`
|
||||
<2> access `l` -> `List reference`;
|
||||
implicit cast `int 1` to `def` -> `def`
|
||||
call `add` on `List reference` with arguments (`def`)
|
||||
<3> declare `int i`;
|
||||
access `l` -> `List reference`;
|
||||
call `get` on `List reference` with arguments (`int 0`) -> `def`;
|
||||
implicit cast `def` to `int 1` -> `int 1`;
|
||||
add `int 1` and `int 2` -> `int 3`;
|
||||
assign `int 3` to `i`
|
||||
+
|
||||
* Sharing a reference type instance.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> List l0 = new ArrayList();
|
||||
<2> List l1 = l0;
|
||||
<3> l0.add(1);
|
||||
<4> l1.add(2);
|
||||
<5> int i = l1.get(0) + l0.get(1);
|
||||
----
|
||||
+
|
||||
<1> declare `List l0`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `l0`
|
||||
<2> declare `List l1`;
|
||||
access `l0` -> `List reference`;
|
||||
assign `List reference` to `l1`
|
||||
(note `l0` and `l1` refer to the same instance known as a shallow-copy)
|
||||
<3> access `l0` -> `List reference`;
|
||||
implicit cast `int 1` to `def` -> `def`
|
||||
call `add` on `List reference` with arguments (`def`)
|
||||
<4> access `l1` -> `List reference`;
|
||||
implicit cast `int 2` to `def` -> `def`
|
||||
call `add` on `List reference` with arguments (`def`)
|
||||
<5> declare `int i`;
|
||||
access `l0` -> `List reference`;
|
||||
call `get` on `List reference` with arguments (`int 0`) -> `def @0`;
|
||||
implicit cast `def @0` to `int 1` -> `int 1`;
|
||||
access `l1` -> `List reference`;
|
||||
call `get` on `List reference` with arguments (`int 1`) -> `def @1`;
|
||||
implicit cast `def @1` to `int 2` -> `int 2`;
|
||||
add `int 1` and `int 2` -> `int 3`;
|
||||
assign `int 3` to `i`;
|
||||
+
|
||||
* Using the static members of a reference type.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> int i = Integer.MAX_VALUE;
|
||||
<2> long l = Long.parseLong("123L");
|
||||
----
|
||||
+
|
||||
<1> declare `int i`;
|
||||
access `MAX_VALUE` on `Integer` -> `int 2147483647`;
|
||||
assign `int 2147483647` to `i`
|
||||
<2> declare `long l`;
|
||||
call `parseLong` on `Long` with arguments (`long 123`) -> `long 123`;
|
||||
assign `long 123` to `l`
|
||||
|
||||
Directly accessing static pieces of a reference type.
|
||||
[[dynamic-types]]
|
||||
==== Dynamic Types
|
||||
|
||||
[source,Java]
|
||||
A dynamic type value can represent the value of any primitive type or
|
||||
reference type using a single type name `def`. A `def` type value mimics
|
||||
the behavior of whatever value it represents at run-time and will always
|
||||
represent the child-most descendant type value of any type value when evaluated
|
||||
during operations.
|
||||
|
||||
Declare a `def` type <<painless-variables, variable>>, and assign it
|
||||
any type of value for evaluation during later operations. The default value
|
||||
for a newly-declared `def` type variable is `null`. A `def` type variable or
|
||||
method/function parameter can change the type it represents during the
|
||||
compilation and evaluation of a script.
|
||||
|
||||
Using the `def` type can have a slight impact on performance. Use only primitive
|
||||
types and reference types directly when performance is critical.
|
||||
|
||||
*Errors*
|
||||
|
||||
* If a `def` type value represents an inappropriate type for evaluation of an
|
||||
operation at run-time.
|
||||
|
||||
*Examples*
|
||||
|
||||
* General uses of the `def` type.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
Integer.MAX_VALUE // a static field access
|
||||
Long.parseLong("123L") // a static function call
|
||||
<1> def dp = 1;
|
||||
<2> def dr = new ArrayList();
|
||||
<3> dr = dp;
|
||||
----
|
||||
+
|
||||
<1> declare `def dp`;
|
||||
implicit cast `int 1` to `def` -> `def`;
|
||||
assign `def` to `dp`
|
||||
<2> declare `def dr`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `def` -> `def`;
|
||||
assign `def` to `dr`
|
||||
<3> access `dp` -> `def`;
|
||||
assign `def` to `dr`;
|
||||
(note the switch in the type `dr` represents from `ArrayList` to `int`)
|
||||
+
|
||||
* A `def` type value representing the child-most descendant of a value.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> Object l = new ArrayList();
|
||||
<2> def d = l;
|
||||
<3> d.ensureCapacity(10);
|
||||
----
|
||||
+
|
||||
<1> declare `Object l`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `Object reference`
|
||||
-> `Object reference`;
|
||||
assign `Object reference` to `l`
|
||||
<2> declare `def d`;
|
||||
access `l` -> `Object reference`;
|
||||
implicit cast `Object reference` to `def` -> `def`;
|
||||
assign `def` to `d`;
|
||||
<3> access `d` -> `def`;
|
||||
implicit cast `def` to `ArrayList reference` -> `ArrayList reference`;
|
||||
call `ensureCapacity` on `ArrayList reference` with arguments (`int 10`);
|
||||
(note `def` was implicit cast to `ArrayList reference`
|
||||
since ArrayList` is the child-most descendant type value that the
|
||||
`def` type value represents)
|
||||
|
||||
[[string-type]]
|
||||
==== String Type
|
||||
|
||||
A `String` is a specialized reference type that is immutable and does not have
|
||||
to be explicitly allocated. You can directly assign to a `String` without first
|
||||
allocating it with the `new` keyword. (Strings can be allocated with the `new`
|
||||
keyword, but it's not required.)
|
||||
The `String` type is a specialized reference type that does not require
|
||||
explicit allocation. Use a <<strings, string literal>> to directly evaluate a
|
||||
`String` type value. While not required, the
|
||||
<<constructor-call, new instance operator>> can allocate `String` type
|
||||
instances.
|
||||
|
||||
When assigning a value to a `String`, you must enclose the text in single or
|
||||
double quotes. Strings are allocated according to the standard Java Memory Model.
|
||||
The default value for a `String` is `null.`
|
||||
*Examples*
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
* General use of the `String` type.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
String r = "some text"; // Declare String r and set it to the
|
||||
// String "some text"
|
||||
String s = 'some text'; // Declare String s and set it to the
|
||||
// String 'some text'
|
||||
String t = new String("some text"); // Declare String t and set it to the
|
||||
// String "some text"
|
||||
String u; // Declare String u and set it to the
|
||||
// default value null
|
||||
<1> String r = "some text";
|
||||
<2> String s = 'some text';
|
||||
<3> String t = new String("some text");
|
||||
<4> String u;
|
||||
----
|
||||
+
|
||||
<1> declare `String r`;
|
||||
assign `String "some text"` to `r`
|
||||
<2> declare `String s`;
|
||||
assign `String 'some text'` to `s`
|
||||
<3> declare `String t`;
|
||||
allocate `String` instance with arguments (`String "some text"`)
|
||||
-> `String "some text"`;
|
||||
assign `String "some text"` to `t`
|
||||
<4> declare `String u`;
|
||||
assign default `null` to `u`
|
||||
|
||||
[[void-type]]
|
||||
==== void Type
|
||||
|
||||
The `void` type represents the concept of no type. In Painless, `void` declares
|
||||
that a function has no return value.
|
||||
The `void` type represents the concept of a lack of type. Use the `void` type to
|
||||
indicate a function returns no value.
|
||||
|
||||
*Examples*
|
||||
|
||||
* Use of the `void` type in a function.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
void addToList(List l, def d) {
|
||||
l.add(d);
|
||||
}
|
||||
----
|
||||
|
||||
[[array-type]]
|
||||
==== Array Type
|
||||
|
||||
Arrays contain a series of elements of the same type that can be allocated
|
||||
simultaneously. Painless supports both single and multi-dimensional arrays for
|
||||
all types except void (including `def`).
|
||||
An array type is a specialized reference type where an array type instance
|
||||
represents a series of values allocated to the heap. All values in an array
|
||||
type instance are of the same type. Each value is assigned an index from within
|
||||
the range `[0, length)` where length is the total number of values allocated for
|
||||
the array type instance.
|
||||
|
||||
You declare an array by specifying a type followed by a series of empty brackets,
|
||||
where each set of brackets represents a dimension. Declared arrays have a default
|
||||
value of `null` and are themselves a reference type.
|
||||
Use the <<new-array, new array operator>> or the
|
||||
<<array-initialization, array initialization operator>> to allocate an array
|
||||
type instance. Declare an array type <<painless-variables, variable>>, and
|
||||
assign it an array type value for evaluation during later operations. The
|
||||
default value for a newly-declared array type variable is `null`. An array type
|
||||
value is shallow-copied during an assignment or as an argument for a
|
||||
method/function call. Assign `null` to an array type variable to indicate the
|
||||
array type value refers to no array type instance. The JVM will garbage collect
|
||||
an array type instance when it is no longer referred to by any array type
|
||||
values. Pass `null` as an argument to a method/function call to indicate the
|
||||
argument refers to no array type instance.
|
||||
|
||||
To allocate an array, you use the `new` keyword followed by the type and a
|
||||
set of brackets for each dimension. You can explicitly define the size of each dimension by specifying an expression within the brackets, or initialize each
|
||||
dimension with the desired number of values. The allocated size of each
|
||||
dimension is its permanent size.
|
||||
Use the <<array-length, array length operator>> to retrieve the length of an
|
||||
array type value as an int type value. Use the
|
||||
<<array-access, array access operator>> to load from and store to individual
|
||||
values within an array type value.
|
||||
|
||||
To initialize an array, specify the values you want to initialize
|
||||
each dimension with as a comma-separated list of expressions enclosed in braces.
|
||||
For example, `new int[] {1, 2, 3}` creates a one-dimensional `int` array with a
|
||||
size of 3 and the values 1, 2, and 3.
|
||||
When an array type instance is allocated with multiple dimensions using the
|
||||
range `[2, d]` where `d >= 2`, each dimension in the range `[1, d-1]` is also
|
||||
an array type. The array type of each dimension, `n`, is an array type with the
|
||||
number of dimensions equal to `d-n`. For example, consider `int[][][]` with 3
|
||||
dimensions. The 3rd dimension, `d-3`, is the primitive type `int`. The 2nd
|
||||
dimension, `d-2`, is the array type `int[]`. And the 1st dimension, `d-1` is
|
||||
the array type `int[][]`.
|
||||
|
||||
When you initialize an array, the order of the expressions is maintained. Each expression used as part of the initialization is converted to the
|
||||
array's type. An error occurs if the types do not match.
|
||||
*Examples*
|
||||
|
||||
*Grammar:*
|
||||
[source,ANTLR4]
|
||||
* General use of single-dimensional arrays.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
declare_array: TYPE ('[' ']')+;
|
||||
|
||||
array_initialization: 'new' TYPE '[' ']' '{' expression (',' expression) '}'
|
||||
| 'new' TYPE '[' ']' '{' '}';
|
||||
<1> int[] x;
|
||||
<2> float[] y = new float[10];
|
||||
<3> def z = new float[5];
|
||||
<4> y[9] = 1.0F;
|
||||
<5> z[0] = y[9];
|
||||
----
|
||||
|
||||
*Examples:*
|
||||
[source,Java]
|
||||
+
|
||||
<1> declare `int[] x`;
|
||||
assign default `null` to `x`
|
||||
<2> declare `float[] y`;
|
||||
allocate `1-d float array` instance with `length [10]`
|
||||
-> `1-d float array reference`;
|
||||
assign `1-d float array reference` to `y`
|
||||
<3> declare `def z`;
|
||||
allocate `1-d float array` instance with `length [5]`
|
||||
-> `1-d float array reference`;
|
||||
implicit cast `1-d float array reference` to `def` -> `def`;
|
||||
assign `def` to `z`
|
||||
<4> access `y` -> `1-d float array reference`;
|
||||
assign `float 1.0` to `index [9]` of `1-d float array reference`
|
||||
<5> access `y` -> `1-d float array reference @0`;
|
||||
access `index [9]` of `1-d float array reference @0` -> `float 1.0`;
|
||||
access `z` -> `def`;
|
||||
implicit cast `def` to `1-d float array reference @1`
|
||||
-> `1-d float array reference @1`;
|
||||
assign `float 1.0` to `index [0]` of `1-d float array reference @1`
|
||||
+
|
||||
* Use of a multi-dimensional array.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
int[] x = new int[5]; // Declare int array x and assign it a newly
|
||||
// allocated int array with a size of 5
|
||||
def[][] y = new def[5][5]; // Declare the 2-dimensional def array y and
|
||||
// assign it a newly allocated 2-dimensional
|
||||
// array where both dimensions have a size of 5
|
||||
int[] x = new int[] {1, 2, 3}; // Declare int array x and set it to an int
|
||||
// array with values 1, 2, 3 and a size of 3
|
||||
int i = 1;
|
||||
long l = 2L;
|
||||
float f = 3.0F;
|
||||
double d = 4.0;
|
||||
String s = "5";
|
||||
def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to
|
||||
// a def array with a size of 4 and the
|
||||
// values i, l, f*d, and s
|
||||
<1> int[][][] ia3 = new int[2][3][4];
|
||||
<2> ia3[1][2][3] = 99;
|
||||
<3> int i = ia3[1][2][3];
|
||||
----
|
||||
+
|
||||
<1> declare `int[][][] ia`;
|
||||
allocate `3-d int array` instance with length `[2, 3, 4]`
|
||||
-> `3-d int array reference`;
|
||||
assign `3-d int array reference` to `ia3`
|
||||
<2> access `ia3` -> `3-d int array reference`;
|
||||
assign `int 99` to `index [1, 2, 3]` of `3-d int array reference`
|
||||
<3> declare `int i`;
|
||||
access `ia3` -> `3-d int array reference`;
|
||||
access `index [1, 2, 3]` of `3-d int array reference` -> `int 99`;
|
||||
assign `int 99` to `i`
|
||||
|
|
|
@ -1,29 +1,31 @@
|
|||
[[painless-variables]]
|
||||
=== Variables
|
||||
|
||||
<<declaration, Declare>> variables to <<assignment, assign>> values for
|
||||
<<painless-operators, use>> in expressions. Specify variables as a
|
||||
<<primitive-types, primitive type>>, <<reference-types, reference type>>, or
|
||||
<<dynamic-types, dynamic type>>. Variable operations follow the structure of a
|
||||
standard JVM in relation to instruction execution and memory usage.
|
||||
A variable loads and stores a value for evaluation during
|
||||
<<painless-operators, operations>>.
|
||||
|
||||
[[declaration]]
|
||||
==== Declaration
|
||||
|
||||
Declare variables before use with the format of <<painless-types, type>>
|
||||
<<painless-identifiers, identifier>>. Specify a comma-separated list of
|
||||
<<painless-identifiers, identifiers>> following the <<painless-types, type>>
|
||||
to declare multiple variables in a single statement. Use an
|
||||
<<assignment, assignment>> statement combined with a declaration statement to
|
||||
immediately assign a value to a variable. Variables not immediately assigned a
|
||||
value will have a default value assigned implicitly based on the
|
||||
<<painless-types, type>>.
|
||||
Declare a variable before use with the format of <<painless-types, type>>
|
||||
followed by <<painless-identifiers, identifier>>. Declare an
|
||||
<<array-type, array type>> variable using an opening `[` token and a closing `]`
|
||||
token for each dimension directly after the identifier. Specify a
|
||||
comma-separated list of identifiers following the type to declare multiple
|
||||
variables in a single statement. Use an <<assignment, assignment operator>>
|
||||
combined with a declaration to immediately assign a value to a variable.
|
||||
A variable not immediately assigned a value will have a default value assigned
|
||||
implicitly based on the type.
|
||||
|
||||
*Errors*
|
||||
|
||||
* If a variable is used prior to or without declaration.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
----
|
||||
declaration : type ID assignment? (',' ID assignment?)*;
|
||||
type: ID ('[' ']')*;
|
||||
type: ID ('.' ID)* ('[' ']')*;
|
||||
assignment: '=' expression;
|
||||
----
|
||||
|
||||
|
@ -35,27 +37,43 @@ assignment: '=' expression;
|
|||
----
|
||||
<1> int x;
|
||||
<2> List y;
|
||||
<3> int x, y, z;
|
||||
<4> def[] d;
|
||||
<3> int x, y = 5, z;
|
||||
<4> def d;
|
||||
<5> int i = 10;
|
||||
<6> float[] f;
|
||||
<7> Map[][] m;
|
||||
----
|
||||
+
|
||||
<1> declare a variable of type `int` and identifier `x`
|
||||
<2> declare a variable of type `List` and identifier `y`
|
||||
<3> declare three variables of type `int` and identifiers `x`, `y`, `z`
|
||||
<4> declare a variable of type `def[]` and identifier `d`
|
||||
<5> declare a variable of type `int` and identifier `i`;
|
||||
assign the integer literal `10` to `i`
|
||||
<1> declare `int x`;
|
||||
assign default `null` to `x`
|
||||
<2> declare `List y`;
|
||||
assign default `null` to `y`
|
||||
<3> declare `int x`;
|
||||
assign default `int 0` to `x`;
|
||||
declare `int y`;
|
||||
assign `int 5` to `y`;
|
||||
declare `int z`;
|
||||
assign default `int 0` to `z`;
|
||||
<4> declare `def d`;
|
||||
assign default `null` to `d`
|
||||
<5> declare `int i`;
|
||||
assign `int 10` to `i`
|
||||
<6> declare `float[] f`;
|
||||
assign default `null` to `f`
|
||||
<7> declare `Map[][] m`;
|
||||
assign default `null` to `m`
|
||||
|
||||
[[assignment]]
|
||||
==== Assignment
|
||||
|
||||
Use the `equals` operator (`=`) to assign a value to a variable. Any expression
|
||||
Use the *assignment operator* to store a value in a variable. Any operation
|
||||
that produces a value can be assigned to any variable as long as the
|
||||
<<painless-types, types>> are the same or the resultant
|
||||
<<painless-types, type>> can be implicitly <<painless-casting, cast>> to
|
||||
the variable <<painless-types, type>>. Otherwise, an error will occur.
|
||||
<<reference-types, Reference type>> values are shallow-copied when assigned.
|
||||
<<painless-types, types>> are the same or the resultant type can be
|
||||
<<painless-casting, implicitly cast>> to the variable type.
|
||||
|
||||
*Errors*
|
||||
|
||||
* If the type of value is unable to match the type of variable.
|
||||
|
||||
*Grammar*
|
||||
[source,ANTLR4]
|
||||
|
@ -65,7 +83,7 @@ assignment: ID '=' expression
|
|||
|
||||
*Examples*
|
||||
|
||||
* Variable assignment with an <<integers, integer literal>>.
|
||||
* Variable assignment with an integer literal.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
|
@ -73,10 +91,11 @@ assignment: ID '=' expression
|
|||
<2> i = 10;
|
||||
----
|
||||
+
|
||||
<1> declare `int i`
|
||||
<2> assign `10` to `i`
|
||||
<1> declare `int i`;
|
||||
assign default `int 0` to `i`
|
||||
<2> assign `int 10` to `i`
|
||||
+
|
||||
* <<declaration, Declaration>> combined with immediate variable assignment.
|
||||
* Declaration combined with immediate assignment.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
|
@ -84,11 +103,12 @@ assignment: ID '=' expression
|
|||
<2> double j = 2.0;
|
||||
----
|
||||
+
|
||||
<1> declare `int i`; assign `10` to `i`
|
||||
<2> declare `double j`; assign `2.0` to `j`
|
||||
<1> declare `int i`;
|
||||
assign `int 10` to `i`
|
||||
<2> declare `double j`;
|
||||
assign `double 2.0` to `j`
|
||||
+
|
||||
* Assignment of one variable to another using
|
||||
<<primitive-types, primitive types>>.
|
||||
* Assignment of one variable to another using primitive types.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
|
@ -96,11 +116,13 @@ assignment: ID '=' expression
|
|||
<2> int j = i;
|
||||
----
|
||||
+
|
||||
<1> declare `int i`; assign `10` to `i`
|
||||
<2> declare `int j`; assign `j` to `i`
|
||||
<1> declare `int i`;
|
||||
assign `int 10` to `i`
|
||||
<2> declare `int j`;
|
||||
access `i` -> `int 10`;
|
||||
assign `int 10` to `j`
|
||||
+
|
||||
* Assignment with <<reference-types, reference types>> using the
|
||||
<<constructor-call, new operator>>.
|
||||
* Assignment with reference types using the *new instance operator*.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
|
@ -108,12 +130,15 @@ assignment: ID '=' expression
|
|||
<2> Map m = new HashMap();
|
||||
----
|
||||
+
|
||||
<1> declare `ArrayList l`; assign a newly-allocated `Arraylist` to `l`
|
||||
<2> declare `Map m`; assign a newly-allocated `HashMap` to `m`
|
||||
with an implicit cast to `Map`
|
||||
<1> declare `ArrayList l`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
assign `ArrayList reference` to `l`
|
||||
<2> declare `Map m`;
|
||||
allocate `HashMap` instance -> `HashMap reference`;
|
||||
implicit cast `HashMap reference` to `Map reference` -> `Map reference`;
|
||||
assign `Map reference` to `m`
|
||||
+
|
||||
* Assignment of one variable to another using
|
||||
<<reference-types, reference types>>.
|
||||
* Assignment of one variable to another using reference types.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
|
@ -123,8 +148,52 @@ assignment: ID '=' expression
|
|||
<4> m = k;
|
||||
----
|
||||
+
|
||||
<1> declare `List l`; assign a newly-allocated `Arraylist` to `l`
|
||||
with an implicit cast to `List`
|
||||
<2> declare `List k`; assign a shallow-copy of `l` to `k`
|
||||
<1> declare `List l`;
|
||||
allocate `ArrayList` instance -> `ArrayList reference`;
|
||||
implicit cast `ArrayList reference` to `List reference` -> `List reference`;
|
||||
assign `List reference` to `l`
|
||||
<2> declare `List k`;
|
||||
access `l` -> `List reference`;
|
||||
assign `List reference` to `k`;
|
||||
(note `l` and `k` refer to the same instance known as a shallow-copy)
|
||||
<3> declare `List m`;
|
||||
<4> assign a shallow-copy of `k` to `m`
|
||||
assign default `null` to `m`
|
||||
<4> access `k` -> `List reference`;
|
||||
assign `List reference` to `m`;
|
||||
(note `l`, `k`, and `m` refer to the same instance)
|
||||
+
|
||||
* Assignment with an array type variable using the *new array operator*.
|
||||
+
|
||||
[source,Painless]
|
||||
----
|
||||
<1> int[] ia1;
|
||||
<2> ia1 = new int[2];
|
||||
<3> ia1[0] = 1;
|
||||
<4> int[] ib1 = ia1;
|
||||
<5> int[][] ic2 = new int[2][5];
|
||||
<6> ic2[1][3] = 2;
|
||||
<7> ic2[0] = ia1;
|
||||
----
|
||||
+
|
||||
<1> declare `int[] ia1`;
|
||||
assign default `null` to `ia1`
|
||||
<2> allocate `1-d int array` instance with `length [2]`
|
||||
-> `1-d int array reference`;
|
||||
assign `1-d int array reference` to `ia1`
|
||||
<3> access `ia1` -> `1-d int array reference`;
|
||||
assign `int 1` to `index [0]` of `1-d int array reference`
|
||||
<4> declare `int[] ib1`;
|
||||
access `ia1` -> `1-d int array reference`;
|
||||
assign `1-d int array reference` to `ib1`;
|
||||
(note `ia1` and `ib1` refer to the same instance known as a shallow copy)
|
||||
<5> declare `int[][] ic2`;
|
||||
allocate `2-d int array` instance with `length [2, 5]`
|
||||
-> `2-d int array reference`;
|
||||
assign `2-d int array reference` to `ic2`
|
||||
<6> access `ic2` -> `2-d int array reference`;
|
||||
assign `int 2` to `index [1, 3]` of `2-d int array reference`
|
||||
<7> access `ia1` -> `1-d int array reference`;
|
||||
access `ic2` -> `2-d int array reference`;
|
||||
assign `1-d int array reference` to
|
||||
`index [0]` of `2-d int array reference`;
|
||||
(note `ia1`, `ib1`, and `index [0]` of `ia2` refer to the same instance)
|
||||
|
|
|
@ -29,6 +29,14 @@
|
|||
[[remove-http-enabled]]
|
||||
==== Http enabled setting removed
|
||||
|
||||
The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing
|
||||
* The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing
|
||||
use of the transport client. This setting has been removed, as the transport client
|
||||
will be removed in the future, thus requiring HTTP to always be enabled.
|
||||
|
||||
[[remove-http-pipelining-setting]]
|
||||
==== Http pipelining setting removed
|
||||
|
||||
* The setting `http.pipelining` previously allowed disabling HTTP pipelining support.
|
||||
This setting has been removed, as disabling http pipelining support on the server
|
||||
provided little value. The setting `http.pipelining.max_events` can still be used to
|
||||
limit the number of pipelined requests in-flight.
|
||||
|
|
|
@ -96,8 +96,6 @@ and stack traces in response output. Note: When set to `false` and the `error_tr
|
|||
parameter is specified, an error will be returned; when `error_trace` is not specified, a
|
||||
simple message will be returned. Defaults to `true`
|
||||
|
||||
|`http.pipelining` |Enable or disable HTTP pipelining, defaults to `true`.
|
||||
|
||||
|`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`.
|
||||
|
||||
|`http.max_warning_header_count` |The maximum number of warning headers in
|
||||
|
|
|
@ -93,6 +93,7 @@ to 'shapes'.
|
|||
* `type` - Index type where the pre-indexed shape is.
|
||||
* `path` - The field specified as path containing the pre-indexed shape.
|
||||
Defaults to 'shape'.
|
||||
* `routing` - The routing of the shape document if required.
|
||||
|
||||
The following is an example of using the Filter with a pre-indexed
|
||||
shape:
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpHandlingSettings;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.rest.AbstractRestChannel;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -59,29 +58,24 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
private final Netty4HttpServerTransport transport;
|
||||
private final Channel channel;
|
||||
private final FullHttpRequest nettyRequest;
|
||||
private final HttpPipelinedRequest pipelinedRequest;
|
||||
private final int sequence;
|
||||
private final ThreadContext threadContext;
|
||||
private final HttpHandlingSettings handlingSettings;
|
||||
|
||||
/**
|
||||
* @param transport The corresponding <code>NettyHttpServerTransport</code> where this channel belongs to.
|
||||
* @param request The request that is handled by this channel.
|
||||
* @param pipelinedRequest If HTTP pipelining is enabled provide the corresponding pipelined request. May be null if
|
||||
* HTTP pipelining is disabled.
|
||||
* @param handlingSettings true iff error messages should include stack traces.
|
||||
* @param threadContext the thread context for the channel
|
||||
* @param transport The corresponding <code>NettyHttpServerTransport</code> where this channel belongs to.
|
||||
* @param request The request that is handled by this channel.
|
||||
* @param sequence The pipelining sequence number for this request
|
||||
* @param handlingSettings true if error messages should include stack traces.
|
||||
* @param threadContext the thread context for the channel
|
||||
*/
|
||||
Netty4HttpChannel(
|
||||
final Netty4HttpServerTransport transport,
|
||||
final Netty4HttpRequest request,
|
||||
final HttpPipelinedRequest pipelinedRequest,
|
||||
final HttpHandlingSettings handlingSettings,
|
||||
final ThreadContext threadContext) {
|
||||
Netty4HttpChannel(Netty4HttpServerTransport transport, Netty4HttpRequest request, int sequence, HttpHandlingSettings handlingSettings,
|
||||
ThreadContext threadContext) {
|
||||
super(request, handlingSettings.getDetailedErrorsEnabled());
|
||||
this.transport = transport;
|
||||
this.channel = request.getChannel();
|
||||
this.nettyRequest = request.request();
|
||||
this.pipelinedRequest = pipelinedRequest;
|
||||
this.sequence = sequence;
|
||||
this.threadContext = threadContext;
|
||||
this.handlingSettings = handlingSettings;
|
||||
}
|
||||
|
@ -129,7 +123,7 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
final ChannelPromise promise = channel.newPromise();
|
||||
|
||||
if (releaseContent) {
|
||||
promise.addListener(f -> ((Releasable)content).close());
|
||||
promise.addListener(f -> ((Releasable) content).close());
|
||||
}
|
||||
|
||||
if (releaseBytesStreamOutput) {
|
||||
|
@ -140,13 +134,9 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
promise.addListener(ChannelFutureListener.CLOSE);
|
||||
}
|
||||
|
||||
final Object msg;
|
||||
if (pipelinedRequest != null) {
|
||||
msg = pipelinedRequest.createHttpResponse(resp, promise);
|
||||
} else {
|
||||
msg = resp;
|
||||
}
|
||||
channel.writeAndFlush(msg, promise);
|
||||
Netty4HttpResponse newResponse = new Netty4HttpResponse(sequence, resp);
|
||||
|
||||
channel.writeAndFlush(newResponse, promise);
|
||||
releaseContent = false;
|
||||
releaseBytesStreamOutput = false;
|
||||
} finally {
|
||||
|
@ -156,9 +146,6 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
if (releaseBytesStreamOutput) {
|
||||
bytesOutputOrNull().close();
|
||||
}
|
||||
if (pipelinedRequest != null) {
|
||||
pipelinedRequest.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.channel.ChannelDuplexHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpPipeliningAggregator;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests.
|
||||
*/
|
||||
public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
|
||||
|
||||
private final Logger logger;
|
||||
private final HttpPipeliningAggregator<Netty4HttpResponse, ChannelPromise> aggregator;
|
||||
|
||||
/**
|
||||
* Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation.
|
||||
*
|
||||
* @param logger for logging unexpected errors
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
|
||||
* required as events cannot queue up indefinitely
|
||||
*/
|
||||
public Netty4HttpPipeliningHandler(Logger logger, final int maxEventsHeld) {
|
||||
this.logger = logger;
|
||||
this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(final ChannelHandlerContext ctx, final Object msg) {
|
||||
if (msg instanceof LastHttpContent) {
|
||||
HttpPipelinedRequest<LastHttpContent> pipelinedRequest = aggregator.read(((LastHttpContent) msg));
|
||||
ctx.fireChannelRead(pipelinedRequest);
|
||||
} else {
|
||||
ctx.fireChannelRead(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
|
||||
assert msg instanceof Netty4HttpResponse : "Message must be type: " + Netty4HttpResponse.class;
|
||||
Netty4HttpResponse response = (Netty4HttpResponse) msg;
|
||||
boolean success = false;
|
||||
try {
|
||||
List<Tuple<Netty4HttpResponse, ChannelPromise>> readyResponses = aggregator.write(response, promise);
|
||||
for (Tuple<Netty4HttpResponse, ChannelPromise> readyResponse : readyResponses) {
|
||||
ctx.write(readyResponse.v1().getResponse(), readyResponse.v2());
|
||||
}
|
||||
success = true;
|
||||
} catch (IllegalStateException e) {
|
||||
ctx.channel().close();
|
||||
} finally {
|
||||
if (success == false) {
|
||||
promise.setFailure(new ClosedChannelException());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
|
||||
List<Tuple<Netty4HttpResponse, ChannelPromise>> inflightResponses = aggregator.removeAllInflightResponses();
|
||||
|
||||
if (inflightResponses.isEmpty() == false) {
|
||||
ClosedChannelException closedChannelException = new ClosedChannelException();
|
||||
for (Tuple<Netty4HttpResponse, ChannelPromise> inflightResponse : inflightResponses) {
|
||||
try {
|
||||
inflightResponse.v2().setFailure(closedChannelException);
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("unexpected error while releasing pipelined http responses", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.close(promise);
|
||||
}
|
||||
}
|
|
@ -30,41 +30,30 @@ import io.netty.handler.codec.http.FullHttpRequest;
|
|||
import io.netty.handler.codec.http.HttpHeaders;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpHandlingSettings;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
@ChannelHandler.Sharable
|
||||
class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
||||
class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> {
|
||||
|
||||
private final Netty4HttpServerTransport serverTransport;
|
||||
private final HttpHandlingSettings handlingSettings;
|
||||
private final boolean httpPipeliningEnabled;
|
||||
private final ThreadContext threadContext;
|
||||
|
||||
Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings,
|
||||
ThreadContext threadContext) {
|
||||
this.serverTransport = serverTransport;
|
||||
this.httpPipeliningEnabled = serverTransport.pipelining;
|
||||
this.handlingSettings = handlingSettings;
|
||||
this.threadContext = threadContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
|
||||
final FullHttpRequest request;
|
||||
final HttpPipelinedRequest pipelinedRequest;
|
||||
if (this.httpPipeliningEnabled && msg instanceof HttpPipelinedRequest) {
|
||||
pipelinedRequest = (HttpPipelinedRequest) msg;
|
||||
request = (FullHttpRequest) pipelinedRequest.last();
|
||||
} else {
|
||||
pipelinedRequest = null;
|
||||
request = (FullHttpRequest) msg;
|
||||
}
|
||||
protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) throws Exception {
|
||||
final FullHttpRequest request = msg.getRequest();
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
|
||||
final FullHttpRequest copy =
|
||||
|
@ -111,7 +100,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
|||
Netty4HttpChannel innerChannel;
|
||||
try {
|
||||
innerChannel =
|
||||
new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, handlingSettings, threadContext);
|
||||
new Netty4HttpChannel(serverTransport, httpRequest, msg.getSequence(), handlingSettings, threadContext);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
if (badRequestCause == null) {
|
||||
badRequestCause = e;
|
||||
|
@ -126,7 +115,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
|||
copy,
|
||||
ctx.channel());
|
||||
innerChannel =
|
||||
new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, handlingSettings, threadContext);
|
||||
new Netty4HttpChannel(serverTransport, innerRequest, msg.getSequence(), handlingSettings, threadContext);
|
||||
}
|
||||
channel = innerChannel;
|
||||
}
|
||||
|
@ -138,12 +127,9 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
|||
} else {
|
||||
serverTransport.dispatchRequest(httpRequest, channel);
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
// the request is otherwise released in case of dispatch
|
||||
if (success == false && pipelinedRequest != null) {
|
||||
pipelinedRequest.release();
|
||||
}
|
||||
// As we have copied the buffer, we can release the request
|
||||
request.release();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.http.HttpPipelinedMessage;
|
||||
|
||||
public class Netty4HttpResponse extends HttpPipelinedMessage {
|
||||
|
||||
private final FullHttpResponse response;
|
||||
|
||||
public Netty4HttpResponse(int sequence, FullHttpResponse response) {
|
||||
super(sequence);
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
public FullHttpResponse getResponse() {
|
||||
return response;
|
||||
}
|
||||
}
|
|
@ -62,7 +62,6 @@ import org.elasticsearch.http.HttpStats;
|
|||
import org.elasticsearch.http.netty4.cors.Netty4CorsConfig;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler;
|
||||
import org.elasticsearch.rest.RestUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler;
|
||||
|
@ -99,7 +98,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_D
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN;
|
||||
|
||||
|
@ -162,8 +160,6 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
|
||||
protected final int workerCount;
|
||||
|
||||
protected final boolean pipelining;
|
||||
|
||||
protected final int pipeliningMaxEvents;
|
||||
|
||||
/**
|
||||
|
@ -204,6 +200,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()),
|
||||
Math.toIntExact(maxChunkSize.getBytes()),
|
||||
Math.toIntExact(maxHeaderSize.getBytes()),
|
||||
|
@ -211,7 +208,8 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
SETTING_HTTP_RESET_COOKIES.get(settings),
|
||||
SETTING_HTTP_COMPRESSION.get(settings),
|
||||
SETTING_HTTP_COMPRESSION_LEVEL.get(settings),
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings));
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings),
|
||||
pipeliningMaxEvents);
|
||||
|
||||
this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
|
||||
this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings);
|
||||
|
@ -226,14 +224,12 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings);
|
||||
recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt());
|
||||
|
||||
this.pipelining = SETTING_PIPELINING.get(settings);
|
||||
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.corsConfig = buildCorsConfig(settings);
|
||||
|
||||
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " +
|
||||
"receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents,
|
||||
pipelining, pipeliningMaxEvents);
|
||||
"receive_predictor[{}], max_composite_buffer_components[{}], pipelining_max_events[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, receivePredictor, maxCompositeBufferComponents,
|
||||
pipeliningMaxEvents);
|
||||
}
|
||||
|
||||
public Settings settings() {
|
||||
|
@ -452,9 +448,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
if (transport.pipelining) {
|
||||
ch.pipeline().addLast("pipelining", new HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents));
|
||||
}
|
||||
ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents));
|
||||
ch.pipeline().addLast("handler", requestHandler);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4.pipelining;
|
||||
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
|
||||
/**
|
||||
* Permits downstream channel events to be ordered and signalled as to whether more are to come for
|
||||
* a given sequence.
|
||||
*/
|
||||
public class HttpPipelinedRequest implements ReferenceCounted {
|
||||
|
||||
private final LastHttpContent last;
|
||||
private final int sequence;
|
||||
|
||||
public HttpPipelinedRequest(final LastHttpContent last, final int sequence) {
|
||||
this.last = last;
|
||||
this.sequence = sequence;
|
||||
}
|
||||
|
||||
public LastHttpContent last() {
|
||||
return last;
|
||||
}
|
||||
|
||||
public HttpPipelinedResponse createHttpResponse(final FullHttpResponse response, final ChannelPromise promise) {
|
||||
return new HttpPipelinedResponse(response, promise, sequence);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int refCnt() {
|
||||
return last.refCnt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain() {
|
||||
last.retain();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain(int increment) {
|
||||
last.retain(increment);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch() {
|
||||
last.touch();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch(Object hint) {
|
||||
last.touch(hint);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release() {
|
||||
return last.release();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release(int decrement) {
|
||||
return last.release(decrement);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
package org.elasticsearch.http.netty4.pipelining;
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
|
||||
class HttpPipelinedResponse implements Comparable<HttpPipelinedResponse>, ReferenceCounted {
|
||||
|
||||
private final FullHttpResponse response;
|
||||
private final ChannelPromise promise;
|
||||
private final int sequence;
|
||||
|
||||
HttpPipelinedResponse(FullHttpResponse response, ChannelPromise promise, int sequence) {
|
||||
this.response = response;
|
||||
this.promise = promise;
|
||||
this.sequence = sequence;
|
||||
}
|
||||
|
||||
public FullHttpResponse response() {
|
||||
return response;
|
||||
}
|
||||
|
||||
public ChannelPromise promise() {
|
||||
return promise;
|
||||
}
|
||||
|
||||
public int sequence() {
|
||||
return sequence;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(HttpPipelinedResponse o) {
|
||||
return Integer.compare(sequence, o.sequence);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int refCnt() {
|
||||
return response.refCnt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain() {
|
||||
response.retain();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain(int increment) {
|
||||
response.retain(increment);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch() {
|
||||
response.touch();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch(Object hint) {
|
||||
response.touch(hint);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release() {
|
||||
return response.release();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release(int decrement) {
|
||||
return response.release(decrement);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4.pipelining;
|
||||
|
||||
import io.netty.channel.ChannelDuplexHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.Collections;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
/**
|
||||
* Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests.
|
||||
*/
|
||||
public class HttpPipeliningHandler extends ChannelDuplexHandler {
|
||||
|
||||
// we use a priority queue so that responses are ordered by their sequence number
|
||||
private final PriorityQueue<HttpPipelinedResponse> holdingQueue;
|
||||
|
||||
private final Logger logger;
|
||||
private final int maxEventsHeld;
|
||||
|
||||
/*
|
||||
* The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the
|
||||
* channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the
|
||||
* current write sequence, implying that all preceding messages have been written.
|
||||
*/
|
||||
private int readSequence;
|
||||
private int writeSequence;
|
||||
|
||||
/**
|
||||
* Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation.
|
||||
*
|
||||
* @param logger for logging unexpected errors
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
|
||||
* required as events cannot queue up indefinitely
|
||||
*/
|
||||
public HttpPipeliningHandler(Logger logger, final int maxEventsHeld) {
|
||||
this.logger = logger;
|
||||
this.maxEventsHeld = maxEventsHeld;
|
||||
this.holdingQueue = new PriorityQueue<>(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception {
|
||||
if (msg instanceof LastHttpContent) {
|
||||
ctx.fireChannelRead(new HttpPipelinedRequest(((LastHttpContent) msg).retain(), readSequence++));
|
||||
} else {
|
||||
ctx.fireChannelRead(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws Exception {
|
||||
if (msg instanceof HttpPipelinedResponse) {
|
||||
final HttpPipelinedResponse current = (HttpPipelinedResponse) msg;
|
||||
/*
|
||||
* We attach the promise to the response. When we invoke a write on the channel with the response, we must ensure that we invoke
|
||||
* the write methods that accept the same promise that we have attached to the response otherwise as the response proceeds
|
||||
* through the handler pipeline a different promise will be used until reaching this handler. Therefore, we assert here that the
|
||||
* attached promise is identical to the provided promise as a safety mechanism that we are respecting this.
|
||||
*/
|
||||
assert current.promise() == promise;
|
||||
|
||||
boolean channelShouldClose = false;
|
||||
|
||||
synchronized (holdingQueue) {
|
||||
if (holdingQueue.size() < maxEventsHeld) {
|
||||
holdingQueue.add(current);
|
||||
|
||||
while (!holdingQueue.isEmpty()) {
|
||||
/*
|
||||
* Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence
|
||||
* number does not match the current write sequence number then we have not processed all preceding responses yet.
|
||||
*/
|
||||
final HttpPipelinedResponse top = holdingQueue.peek();
|
||||
if (top.sequence() != writeSequence) {
|
||||
break;
|
||||
}
|
||||
holdingQueue.remove();
|
||||
/*
|
||||
* We must use the promise attached to the response; this is necessary since are going to hold a response until all
|
||||
* responses that precede it in the pipeline are written first. Note that the promise from the method invocation is
|
||||
* not ignored, it will already be attached to an existing response and consumed when that response is drained.
|
||||
*/
|
||||
ctx.write(top.response(), top.promise());
|
||||
writeSequence++;
|
||||
}
|
||||
} else {
|
||||
channelShouldClose = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (channelShouldClose) {
|
||||
try {
|
||||
Netty4Utils.closeChannels(Collections.singletonList(ctx.channel()));
|
||||
} finally {
|
||||
current.release();
|
||||
promise.setSuccess();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ctx.write(msg, promise);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
|
||||
if (holdingQueue.isEmpty() == false) {
|
||||
ClosedChannelException closedChannelException = new ClosedChannelException();
|
||||
HttpPipelinedResponse pipelinedResponse;
|
||||
while ((pipelinedResponse = holdingQueue.poll()) != null) {
|
||||
try {
|
||||
pipelinedResponse.release();
|
||||
pipelinedResponse.promise().setFailure(closedChannelException);
|
||||
} catch (Exception e) {
|
||||
logger.error("unexpected error while releasing pipelined http responses", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.close(promise);
|
||||
}
|
||||
}
|
|
@ -60,7 +60,6 @@ import org.elasticsearch.http.HttpHandlingSettings;
|
|||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.http.NullDispatcher;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
|
@ -212,12 +211,12 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
|
||||
httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote");
|
||||
final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel();
|
||||
Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel);
|
||||
final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel);
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
|
||||
// send a response
|
||||
Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
TestResponse resp = new TestResponse();
|
||||
final String customHeader = "custom-header";
|
||||
final String customHeaderValue = "xyz";
|
||||
|
@ -227,7 +226,7 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
// inspect what was written
|
||||
List<Object> writtenObjects = writeCapturingChannel.getWrittenObjects();
|
||||
assertThat(writtenObjects.size(), is(1));
|
||||
HttpResponse response = (HttpResponse) writtenObjects.get(0);
|
||||
HttpResponse response = ((Netty4HttpResponse) writtenObjects.get(0)).getResponse();
|
||||
assertThat(response.headers().get("non-existent-header"), nullValue());
|
||||
assertThat(response.headers().get(customHeader), equalTo(customHeaderValue));
|
||||
assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length())));
|
||||
|
@ -243,10 +242,9 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel();
|
||||
final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel);
|
||||
final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null;
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
final Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
final TestResponse response = new TestResponse(bigArrays);
|
||||
assertThat(response.content(), instanceOf(Releasable.class));
|
||||
embeddedChannel.close();
|
||||
|
@ -263,10 +261,9 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel();
|
||||
final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel);
|
||||
final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null;
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
final Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR,
|
||||
JsonXContent.contentBuilder().startObject().endObject());
|
||||
assertThat(response.content(), not(instanceOf(Releasable.class)));
|
||||
|
@ -312,7 +309,7 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
assertTrue(embeddedChannel.isOpen());
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
final Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
final TestResponse resp = new TestResponse();
|
||||
channel.sendResponse(resp);
|
||||
assertThat(embeddedChannel.isOpen(), equalTo(!close));
|
||||
|
@ -340,13 +337,13 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
|
||||
Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
channel.sendResponse(new TestResponse());
|
||||
|
||||
// get the response
|
||||
List<Object> writtenObjects = writeCapturingChannel.getWrittenObjects();
|
||||
assertThat(writtenObjects.size(), is(1));
|
||||
return (FullHttpResponse) writtenObjects.get(0);
|
||||
return ((Netty4HttpResponse) writtenObjects.get(0)).getResponse();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4.pipelining;
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
|
@ -37,6 +37,7 @@ import io.netty.handler.codec.http.HttpVersion;
|
|||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.handler.codec.http.QueryStringDecoder;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
|
||||
|
@ -62,7 +63,8 @@ import static org.hamcrest.core.Is.is;
|
|||
|
||||
public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
||||
|
||||
private final ExecutorService executorService = Executors.newFixedThreadPool(randomIntBetween(4, 8));
|
||||
private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8));
|
||||
private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1);
|
||||
private final Map<String, CountDownLatch> waitingRequests = new ConcurrentHashMap<>();
|
||||
private final Map<String, CountDownLatch> finishingRequests = new ConcurrentHashMap<>();
|
||||
|
||||
|
@ -79,15 +81,19 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private void shutdownExecutorService() throws InterruptedException {
|
||||
if (!executorService.isShutdown()) {
|
||||
executorService.shutdown();
|
||||
executorService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
if (!handlerService.isShutdown()) {
|
||||
handlerService.shutdown();
|
||||
handlerService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
if (!eventLoopService.isShutdown()) {
|
||||
eventLoopService.shutdown();
|
||||
eventLoopService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
|
@ -114,7 +120,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
|
@ -147,7 +153,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(
|
||||
new AggregateUrisAndHeadersHandler(),
|
||||
new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
|
@ -176,7 +182,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < 1 + numberOfRequests + 1; i++) {
|
||||
|
@ -184,7 +190,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
final List<Integer> requests = IntStream.range(1, numberOfRequests + 1).mapToObj(r -> r).collect(Collectors.toList());
|
||||
final List<Integer> requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList());
|
||||
Randomness.shuffle(requests);
|
||||
|
||||
for (final Integer request : requests) {
|
||||
|
@ -205,25 +211,26 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
public void testPipeliningRequestsAreReleased() throws InterruptedException {
|
||||
final int numberOfRequests = 10;
|
||||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests + 1));
|
||||
new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests + 1));
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + i));
|
||||
}
|
||||
|
||||
HttpPipelinedRequest inbound;
|
||||
ArrayList<HttpPipelinedRequest> requests = new ArrayList<>();
|
||||
HttpPipelinedRequest<FullHttpRequest> inbound;
|
||||
ArrayList<HttpPipelinedRequest<FullHttpRequest>> requests = new ArrayList<>();
|
||||
while ((inbound = embeddedChannel.readInbound()) != null) {
|
||||
requests.add(inbound);
|
||||
}
|
||||
|
||||
ArrayList<ChannelPromise> promises = new ArrayList<>();
|
||||
for (int i = 1; i < requests.size(); ++i) {
|
||||
final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK);
|
||||
final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK);
|
||||
ChannelPromise promise = embeddedChannel.newPromise();
|
||||
promises.add(promise);
|
||||
HttpPipelinedResponse response = requests.get(i).createHttpResponse(httpResponse, promise);
|
||||
embeddedChannel.writeAndFlush(response, promise);
|
||||
int sequence = requests.get(i).getSequence();
|
||||
Netty4HttpResponse resp = new Netty4HttpResponse(sequence, httpResponse);
|
||||
embeddedChannel.writeAndFlush(resp, promise);
|
||||
}
|
||||
|
||||
for (ChannelPromise promise : promises) {
|
||||
|
@ -260,14 +267,14 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
private class WorkEmulatorHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest> {
|
||||
private class WorkEmulatorHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<LastHttpContent>> {
|
||||
|
||||
@Override
|
||||
protected void channelRead0(final ChannelHandlerContext ctx, final HttpPipelinedRequest pipelinedRequest) throws Exception {
|
||||
protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest<LastHttpContent> pipelinedRequest) {
|
||||
LastHttpContent request = pipelinedRequest.getRequest();
|
||||
final QueryStringDecoder decoder;
|
||||
if (pipelinedRequest.last() instanceof FullHttpRequest) {
|
||||
final FullHttpRequest fullHttpRequest = (FullHttpRequest) pipelinedRequest.last();
|
||||
decoder = new QueryStringDecoder(fullHttpRequest.uri());
|
||||
if (request instanceof FullHttpRequest) {
|
||||
decoder = new QueryStringDecoder(((FullHttpRequest)request).uri());
|
||||
} else {
|
||||
decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll());
|
||||
}
|
||||
|
@ -282,12 +289,14 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
final CountDownLatch finishingLatch = new CountDownLatch(1);
|
||||
finishingRequests.put(uri, finishingLatch);
|
||||
|
||||
executorService.submit(() -> {
|
||||
handlerService.submit(() -> {
|
||||
try {
|
||||
waitingLatch.await(1000, TimeUnit.SECONDS);
|
||||
final ChannelPromise promise = ctx.newPromise();
|
||||
ctx.write(pipelinedRequest.createHttpResponse(httpResponse, promise), promise);
|
||||
finishingLatch.countDown();
|
||||
eventLoopService.submit(() -> {
|
||||
ctx.write(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise);
|
||||
finishingLatch.countDown();
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
fail(e.toString());
|
||||
}
|
|
@ -38,9 +38,9 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.http.NullDispatcher;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -52,16 +52,11 @@ import java.nio.charset.StandardCharsets;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
/**
|
||||
* This test just tests, if he pipelining works in general with out any connection the Elasticsearch handler
|
||||
|
@ -85,9 +80,8 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testThatHttpPipeliningWorksWhenEnabled() throws Exception {
|
||||
public void testThatHttpPipeliningWorks() throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put("http.pipelining", true)
|
||||
.put("http.port", "0")
|
||||
.build();
|
||||
try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) {
|
||||
|
@ -112,48 +106,6 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testThatHttpPipeliningCanBeDisabled() throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put("http.pipelining", false)
|
||||
.put("http.port", "0")
|
||||
.build();
|
||||
try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) {
|
||||
httpServerTransport.start();
|
||||
final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses());
|
||||
|
||||
final int numberOfRequests = randomIntBetween(4, 16);
|
||||
final Set<Integer> slowIds = new HashSet<>();
|
||||
final List<String> requests = new ArrayList<>(numberOfRequests);
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
if (rarely()) {
|
||||
requests.add("/slow/" + i);
|
||||
slowIds.add(i);
|
||||
} else {
|
||||
requests.add("/" + i);
|
||||
}
|
||||
}
|
||||
|
||||
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
|
||||
Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{}));
|
||||
List<String> responseBodies = new ArrayList<>(Netty4HttpClient.returnHttpResponseBodies(responses));
|
||||
// we can not be sure about the order of the responses, but the slow ones should come last
|
||||
assertThat(responseBodies, hasSize(numberOfRequests));
|
||||
for (int i = 0; i < numberOfRequests - slowIds.size(); i++) {
|
||||
assertThat(responseBodies.get(i), matches("/\\d+"));
|
||||
}
|
||||
|
||||
final Set<Integer> ids = new HashSet<>();
|
||||
for (int i = 0; i < slowIds.size(); i++) {
|
||||
final String response = responseBodies.get(numberOfRequests - slowIds.size() + i);
|
||||
assertThat(response, matches("/slow/\\d+" ));
|
||||
assertTrue(ids.add(Integer.parseInt(response.split("/")[2])));
|
||||
}
|
||||
|
||||
assertThat(slowIds, equalTo(ids));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class CustomNettyHttpServerTransport extends Netty4HttpServerTransport {
|
||||
|
||||
private final ExecutorService executorService = Executors.newCachedThreadPool();
|
||||
|
@ -196,7 +148,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler<Object> {
|
||||
class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> {
|
||||
|
||||
private final ExecutorService executorService;
|
||||
|
||||
|
@ -205,7 +157,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
|
||||
protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) throws Exception {
|
||||
executorService.submit(new PossiblySlowRunnable(ctx, msg));
|
||||
}
|
||||
|
||||
|
@ -220,26 +172,18 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
class PossiblySlowRunnable implements Runnable {
|
||||
|
||||
private ChannelHandlerContext ctx;
|
||||
private HttpPipelinedRequest pipelinedRequest;
|
||||
private HttpPipelinedRequest<FullHttpRequest> pipelinedRequest;
|
||||
private FullHttpRequest fullHttpRequest;
|
||||
|
||||
PossiblySlowRunnable(ChannelHandlerContext ctx, Object msg) {
|
||||
PossiblySlowRunnable(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) {
|
||||
this.ctx = ctx;
|
||||
if (msg instanceof HttpPipelinedRequest) {
|
||||
this.pipelinedRequest = (HttpPipelinedRequest) msg;
|
||||
} else if (msg instanceof FullHttpRequest) {
|
||||
this.fullHttpRequest = (FullHttpRequest) msg;
|
||||
}
|
||||
this.pipelinedRequest = msg;
|
||||
this.fullHttpRequest = pipelinedRequest.getRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
final String uri;
|
||||
if (pipelinedRequest != null && pipelinedRequest.last() instanceof FullHttpRequest) {
|
||||
uri = ((FullHttpRequest) pipelinedRequest.last()).uri();
|
||||
} else {
|
||||
uri = fullHttpRequest.uri();
|
||||
}
|
||||
final String uri = fullHttpRequest.uri();
|
||||
|
||||
final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8);
|
||||
|
||||
|
@ -258,13 +202,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final ChannelPromise promise = ctx.newPromise();
|
||||
final Object msg;
|
||||
if (pipelinedRequest != null) {
|
||||
msg = pipelinedRequest.createHttpResponse(httpResponse, promise);
|
||||
} else {
|
||||
msg = httpResponse;
|
||||
}
|
||||
ctx.writeAndFlush(msg, promise);
|
||||
ctx.writeAndFlush(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.elasticsearch.http.netty4;
|
|||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.ESNetty4IntegTestCase;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -35,21 +33,13 @@ import static org.hamcrest.Matchers.hasSize;
|
|||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase {
|
||||
public class Netty4PipeliningIT extends ESNetty4IntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("http.pipelining", true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testThatNettyHttpServerSupportsPipelining() throws Exception {
|
||||
String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"};
|
||||
|
|
@ -25,20 +25,21 @@ import io.netty.handler.codec.ByteToMessageDecoder;
|
|||
import io.netty.handler.codec.http.DefaultFullHttpRequest;
|
||||
import io.netty.handler.codec.http.DefaultHttpHeaders;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.HttpContentCompressor;
|
||||
import io.netty.handler.codec.http.HttpContentDecompressor;
|
||||
import io.netty.handler.codec.http.HttpHeaders;
|
||||
import io.netty.handler.codec.http.HttpObjectAggregator;
|
||||
import io.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import io.netty.handler.codec.http.HttpResponseEncoder;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.http.HttpHandlingSettings;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.nio.FlushOperation;
|
||||
import org.elasticsearch.nio.InboundChannelBuffer;
|
||||
import org.elasticsearch.nio.ReadWriteHandler;
|
||||
import org.elasticsearch.nio.NioSocketChannel;
|
||||
import org.elasticsearch.nio.ReadWriteHandler;
|
||||
import org.elasticsearch.nio.SocketChannelContext;
|
||||
import org.elasticsearch.nio.WriteOperation;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
@ -77,6 +78,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
|||
if (settings.isCompression()) {
|
||||
handlers.add(new HttpContentCompressor(settings.getCompressionLevel()));
|
||||
}
|
||||
handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents()));
|
||||
|
||||
adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0]));
|
||||
adaptor.addCloseListener((v, e) -> nioChannel.close());
|
||||
|
@ -95,9 +97,9 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
|||
|
||||
@Override
|
||||
public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Throwable> listener) {
|
||||
assert message instanceof FullHttpResponse : "This channel only supports messages that are of type: " + FullHttpResponse.class
|
||||
+ ". Found type: " + message.getClass() + ".";
|
||||
return new HttpWriteOperation(context, (FullHttpResponse) message, listener);
|
||||
assert message instanceof NioHttpResponse : "This channel only supports messages that are of type: "
|
||||
+ NioHttpResponse.class + ". Found type: " + message.getClass() + ".";
|
||||
return new HttpWriteOperation(context, (NioHttpResponse) message, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,76 +127,85 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void handleRequest(Object msg) {
|
||||
final FullHttpRequest request = (FullHttpRequest) msg;
|
||||
final HttpPipelinedRequest<FullHttpRequest> pipelinedRequest = (HttpPipelinedRequest<FullHttpRequest>) msg;
|
||||
FullHttpRequest request = pipelinedRequest.getRequest();
|
||||
|
||||
final FullHttpRequest copiedRequest =
|
||||
new DefaultFullHttpRequest(
|
||||
request.protocolVersion(),
|
||||
request.method(),
|
||||
request.uri(),
|
||||
Unpooled.copiedBuffer(request.content()),
|
||||
request.headers(),
|
||||
request.trailingHeaders());
|
||||
try {
|
||||
final FullHttpRequest copiedRequest =
|
||||
new DefaultFullHttpRequest(
|
||||
request.protocolVersion(),
|
||||
request.method(),
|
||||
request.uri(),
|
||||
Unpooled.copiedBuffer(request.content()),
|
||||
request.headers(),
|
||||
request.trailingHeaders());
|
||||
|
||||
Exception badRequestCause = null;
|
||||
Exception badRequestCause = null;
|
||||
|
||||
/*
|
||||
* We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there
|
||||
* are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we
|
||||
* attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header,
|
||||
* or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the
|
||||
* underlying exception that caused us to treat the request as bad.
|
||||
*/
|
||||
final NioHttpRequest httpRequest;
|
||||
{
|
||||
NioHttpRequest innerHttpRequest;
|
||||
try {
|
||||
innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest);
|
||||
} catch (final RestRequest.ContentTypeHeaderException e) {
|
||||
badRequestCause = e;
|
||||
innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause);
|
||||
} catch (final RestRequest.BadParameterException e) {
|
||||
badRequestCause = e;
|
||||
innerHttpRequest = requestWithoutParameters(copiedRequest);
|
||||
}
|
||||
httpRequest = innerHttpRequest;
|
||||
}
|
||||
|
||||
/*
|
||||
* We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid
|
||||
* parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an
|
||||
* IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these
|
||||
* parameter values.
|
||||
*/
|
||||
final NioHttpChannel channel;
|
||||
{
|
||||
NioHttpChannel innerChannel;
|
||||
try {
|
||||
innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), httpRequest, settings, threadContext);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
if (badRequestCause == null) {
|
||||
/*
|
||||
* We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there
|
||||
* are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we
|
||||
* attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header,
|
||||
* or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the
|
||||
* underlying exception that caused us to treat the request as bad.
|
||||
*/
|
||||
final NioHttpRequest httpRequest;
|
||||
{
|
||||
NioHttpRequest innerHttpRequest;
|
||||
try {
|
||||
innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest);
|
||||
} catch (final RestRequest.ContentTypeHeaderException e) {
|
||||
badRequestCause = e;
|
||||
} else {
|
||||
badRequestCause.addSuppressed(e);
|
||||
innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause);
|
||||
} catch (final RestRequest.BadParameterException e) {
|
||||
badRequestCause = e;
|
||||
innerHttpRequest = requestWithoutParameters(copiedRequest);
|
||||
}
|
||||
final NioHttpRequest innerRequest =
|
||||
new NioHttpRequest(
|
||||
xContentRegistry,
|
||||
Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters
|
||||
copiedRequest.uri(),
|
||||
copiedRequest);
|
||||
innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), innerRequest, settings, threadContext);
|
||||
httpRequest = innerHttpRequest;
|
||||
}
|
||||
channel = innerChannel;
|
||||
}
|
||||
|
||||
if (request.decoderResult().isFailure()) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause());
|
||||
} else if (badRequestCause != null) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, badRequestCause);
|
||||
} else {
|
||||
transport.dispatchRequest(httpRequest, channel);
|
||||
/*
|
||||
* We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid
|
||||
* parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an
|
||||
* IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of
|
||||
* these parameter values.
|
||||
*/
|
||||
final NioHttpChannel channel;
|
||||
{
|
||||
NioHttpChannel innerChannel;
|
||||
int sequence = pipelinedRequest.getSequence();
|
||||
BigArrays bigArrays = transport.getBigArrays();
|
||||
try {
|
||||
innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, threadContext);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
if (badRequestCause == null) {
|
||||
badRequestCause = e;
|
||||
} else {
|
||||
badRequestCause.addSuppressed(e);
|
||||
}
|
||||
final NioHttpRequest innerRequest =
|
||||
new NioHttpRequest(
|
||||
xContentRegistry,
|
||||
Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters
|
||||
copiedRequest.uri(),
|
||||
copiedRequest);
|
||||
innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, threadContext);
|
||||
}
|
||||
channel = innerChannel;
|
||||
}
|
||||
|
||||
if (request.decoderResult().isFailure()) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause());
|
||||
} else if (badRequestCause != null) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, badRequestCause);
|
||||
} else {
|
||||
transport.dispatchRequest(httpRequest, channel);
|
||||
}
|
||||
} finally {
|
||||
// As we have copied the buffer, we can release the request
|
||||
request.release();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.nio.SocketChannelContext;
|
||||
import org.elasticsearch.nio.WriteOperation;
|
||||
|
||||
|
@ -28,10 +27,10 @@ import java.util.function.BiConsumer;
|
|||
public class HttpWriteOperation implements WriteOperation {
|
||||
|
||||
private final SocketChannelContext channelContext;
|
||||
private final FullHttpResponse response;
|
||||
private final NioHttpResponse response;
|
||||
private final BiConsumer<Void, Throwable> listener;
|
||||
|
||||
HttpWriteOperation(SocketChannelContext channelContext, FullHttpResponse response, BiConsumer<Void, Throwable> listener) {
|
||||
HttpWriteOperation(SocketChannelContext channelContext, NioHttpResponse response, BiConsumer<Void, Throwable> listener) {
|
||||
this.channelContext = channelContext;
|
||||
this.response = response;
|
||||
this.listener = listener;
|
||||
|
@ -48,7 +47,7 @@ public class HttpWriteOperation implements WriteOperation {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FullHttpResponse getObject() {
|
||||
public NioHttpResponse getObject() {
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,12 +53,7 @@ public class NettyAdaptor implements AutoCloseable {
|
|||
try {
|
||||
ByteBuf message = (ByteBuf) msg;
|
||||
promise.addListener((f) -> message.release());
|
||||
NettyListener listener;
|
||||
if (promise instanceof NettyListener) {
|
||||
listener = (NettyListener) promise;
|
||||
} else {
|
||||
listener = new NettyListener(promise);
|
||||
}
|
||||
NettyListener listener = NettyListener.fromChannelPromise(promise);
|
||||
flushOperations.add(new FlushOperation(message.nioBuffers(), listener));
|
||||
} catch (Exception e) {
|
||||
promise.setFailure(e);
|
||||
|
@ -107,18 +102,7 @@ public class NettyAdaptor implements AutoCloseable {
|
|||
}
|
||||
|
||||
public void write(WriteOperation writeOperation) {
|
||||
ChannelPromise channelPromise = nettyChannel.newPromise();
|
||||
channelPromise.addListener(f -> {
|
||||
BiConsumer<Void, Throwable> consumer = writeOperation.getListener();
|
||||
if (f.cause() == null) {
|
||||
consumer.accept(null, null);
|
||||
} else {
|
||||
ExceptionsHelper.dieOnError(f.cause());
|
||||
consumer.accept(null, f.cause());
|
||||
}
|
||||
});
|
||||
|
||||
nettyChannel.writeAndFlush(writeOperation.getObject(), new NettyListener(channelPromise));
|
||||
nettyChannel.writeAndFlush(writeOperation.getObject(), NettyListener.fromBiConsumer(writeOperation.getListener(), nettyChannel));
|
||||
}
|
||||
|
||||
public FlushOperation pollOutboundOperation() {
|
||||
|
|
|
@ -23,7 +23,7 @@ import io.netty.channel.Channel;
|
|||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.GenericFutureListener;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -40,7 +40,7 @@ public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromis
|
|||
|
||||
private final ChannelPromise promise;
|
||||
|
||||
NettyListener(ChannelPromise promise) {
|
||||
private NettyListener(ChannelPromise promise) {
|
||||
this.promise = promise;
|
||||
}
|
||||
|
||||
|
@ -211,4 +211,30 @@ public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromis
|
|||
public ChannelPromise unvoid() {
|
||||
return promise.unvoid();
|
||||
}
|
||||
|
||||
public static NettyListener fromBiConsumer(BiConsumer<Void, Throwable> biConsumer, Channel channel) {
|
||||
if (biConsumer instanceof NettyListener) {
|
||||
return (NettyListener) biConsumer;
|
||||
} else {
|
||||
ChannelPromise channelPromise = channel.newPromise();
|
||||
channelPromise.addListener(f -> {
|
||||
if (f.cause() == null) {
|
||||
biConsumer.accept(null, null);
|
||||
} else {
|
||||
ExceptionsHelper.dieOnError(f.cause());
|
||||
biConsumer.accept(null, f.cause());
|
||||
}
|
||||
});
|
||||
|
||||
return new NettyListener(channelPromise);
|
||||
}
|
||||
}
|
||||
|
||||
public static NettyListener fromChannelPromise(ChannelPromise channelPromise) {
|
||||
if (channelPromise instanceof NettyListener) {
|
||||
return (NettyListener) channelPromise;
|
||||
} else {
|
||||
return new NettyListener(channelPromise);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,20 +52,23 @@ import java.util.EnumMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class NioHttpChannel extends AbstractRestChannel {
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
private final int sequence;
|
||||
private final ThreadContext threadContext;
|
||||
private final FullHttpRequest nettyRequest;
|
||||
private final NioSocketChannel nioChannel;
|
||||
private final boolean resetCookies;
|
||||
|
||||
NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request,
|
||||
NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence,
|
||||
HttpHandlingSettings settings, ThreadContext threadContext) {
|
||||
super(request, settings.getDetailedErrorsEnabled());
|
||||
this.nioChannel = nioChannel;
|
||||
this.bigArrays = bigArrays;
|
||||
this.sequence = sequence;
|
||||
this.threadContext = threadContext;
|
||||
this.nettyRequest = request.getRequest();
|
||||
this.resetCookies = settings.isResetCookies();
|
||||
|
@ -117,9 +120,8 @@ public class NioHttpChannel extends AbstractRestChannel {
|
|||
toClose.add(nioChannel::close);
|
||||
}
|
||||
|
||||
nioChannel.getContext().sendMessage(resp, (aVoid, throwable) -> {
|
||||
Releasables.close(toClose);
|
||||
});
|
||||
BiConsumer<Void, Throwable> listener = (aVoid, throwable) -> Releasables.close(toClose);
|
||||
nioChannel.getContext().sendMessage(new NioHttpResponse(sequence, resp), listener);
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.channel.ChannelDuplexHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpPipeliningAggregator;
|
||||
import org.elasticsearch.http.nio.NettyListener;
|
||||
import org.elasticsearch.http.nio.NioHttpResponse;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests.
|
||||
*/
|
||||
public class NioHttpPipeliningHandler extends ChannelDuplexHandler {
|
||||
|
||||
private final Logger logger;
|
||||
private final HttpPipeliningAggregator<NioHttpResponse, NettyListener> aggregator;
|
||||
|
||||
/**
|
||||
* Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation.
|
||||
*
|
||||
* @param logger for logging unexpected errors
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
|
||||
* required as events cannot queue up indefinitely
|
||||
*/
|
||||
public NioHttpPipeliningHandler(Logger logger, final int maxEventsHeld) {
|
||||
this.logger = logger;
|
||||
this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(final ChannelHandlerContext ctx, final Object msg) {
|
||||
if (msg instanceof LastHttpContent) {
|
||||
HttpPipelinedRequest<LastHttpContent> pipelinedRequest = aggregator.read(((LastHttpContent) msg));
|
||||
ctx.fireChannelRead(pipelinedRequest);
|
||||
} else {
|
||||
ctx.fireChannelRead(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
|
||||
assert msg instanceof NioHttpResponse : "Message must be type: " + NioHttpResponse.class;
|
||||
NioHttpResponse response = (NioHttpResponse) msg;
|
||||
boolean success = false;
|
||||
try {
|
||||
NettyListener listener = NettyListener.fromChannelPromise(promise);
|
||||
List<Tuple<NioHttpResponse, NettyListener>> readyResponses = aggregator.write(response, listener);
|
||||
success = true;
|
||||
for (Tuple<NioHttpResponse, NettyListener> responseToWrite : readyResponses) {
|
||||
ctx.write(responseToWrite.v1().getResponse(), responseToWrite.v2());
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
ctx.channel().close();
|
||||
} finally {
|
||||
if (success == false) {
|
||||
promise.setFailure(new ClosedChannelException());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
|
||||
List<Tuple<NioHttpResponse, NettyListener>> inflightResponses = aggregator.removeAllInflightResponses();
|
||||
|
||||
if (inflightResponses.isEmpty() == false) {
|
||||
ClosedChannelException closedChannelException = new ClosedChannelException();
|
||||
for (Tuple<NioHttpResponse, NettyListener> inflightResponse : inflightResponses) {
|
||||
try {
|
||||
inflightResponse.v2().setFailure(closedChannelException);
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("unexpected error while releasing pipelined http responses", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.close(promise);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.http.HttpPipelinedMessage;
|
||||
|
||||
public class NioHttpResponse extends HttpPipelinedMessage {
|
||||
|
||||
private final FullHttpResponse response;
|
||||
|
||||
public NioHttpResponse(int sequence, FullHttpResponse response) {
|
||||
super(sequence);
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
public FullHttpResponse getResponse() {
|
||||
return response;
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.timeout.ReadTimeoutException;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -84,6 +85,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_D
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
|
||||
public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
||||
|
||||
|
@ -124,6 +126,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
int pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()),
|
||||
Math.toIntExact(maxChunkSize.getBytes()),
|
||||
Math.toIntExact(maxHeaderSize.getBytes()),
|
||||
|
@ -131,7 +134,8 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
SETTING_HTTP_RESET_COOKIES.get(settings),
|
||||
SETTING_HTTP_COMPRESSION.get(settings),
|
||||
SETTING_HTTP_COMPRESSION_LEVEL.get(settings),
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings));
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings),
|
||||
pipeliningMaxEvents);
|
||||
|
||||
this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings);
|
||||
this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings);
|
||||
|
@ -140,14 +144,19 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes());
|
||||
|
||||
|
||||
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength);
|
||||
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," +
|
||||
" pipelining_max_events[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, pipeliningMaxEvents);
|
||||
}
|
||||
|
||||
BigArrays getBigArrays() {
|
||||
return bigArrays;
|
||||
}
|
||||
|
||||
public Logger getLogger() {
|
||||
return logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
boolean success = false;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch;
|
|||
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.nio.NioHttpServerTransport;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.transport.nio.NioTransport;
|
||||
|
@ -43,11 +44,13 @@ public abstract class NioIntegTestCase extends ESIntegTestCase {
|
|||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal));
|
||||
// randomize netty settings
|
||||
// randomize nio settings
|
||||
if (randomBoolean()) {
|
||||
builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1);
|
||||
builder.put(NioHttpServerTransport.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1);
|
||||
}
|
||||
builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME);
|
||||
builder.put(NetworkModule.HTTP_TYPE_KEY, NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -61,11 +61,11 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUN
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyZeroInteractions;
|
||||
|
||||
public class HttpReadWriteHandlerTests extends ESTestCase {
|
||||
|
||||
|
@ -91,7 +91,8 @@ public class HttpReadWriteHandlerTests extends ESTestCase {
|
|||
SETTING_HTTP_RESET_COOKIES.getDefault(settings),
|
||||
SETTING_HTTP_COMPRESSION.getDefault(settings),
|
||||
SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings),
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings));
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings),
|
||||
SETTING_PIPELINING_MAX_EVENTS.getDefault(settings));
|
||||
ThreadContext threadContext = new ThreadContext(settings);
|
||||
nioSocketChannel = mock(NioSocketChannel.class);
|
||||
handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext);
|
||||
|
@ -148,7 +149,8 @@ public class HttpReadWriteHandlerTests extends ESTestCase {
|
|||
|
||||
handler.consumeReads(toChannelBuffer(buf));
|
||||
|
||||
verifyZeroInteractions(transport);
|
||||
verify(transport, times(0)).dispatchBadRequest(any(), any(), any());
|
||||
verify(transport, times(0)).dispatchRequest(any(), any());
|
||||
|
||||
List<FlushOperation> flushOperations = handler.pollFlushOperations();
|
||||
assertFalse(flushOperations.isEmpty());
|
||||
|
@ -169,9 +171,10 @@ public class HttpReadWriteHandlerTests extends ESTestCase {
|
|||
prepareHandlerForResponse(handler);
|
||||
|
||||
FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
|
||||
NioHttpResponse pipelinedResponse = new NioHttpResponse(0, fullHttpResponse);
|
||||
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
HttpWriteOperation writeOperation = new HttpWriteOperation(context, fullHttpResponse, mock(BiConsumer.class));
|
||||
HttpWriteOperation writeOperation = new HttpWriteOperation(context, pipelinedResponse, mock(BiConsumer.class));
|
||||
List<FlushOperation> flushOperations = handler.writeToBytes(writeOperation);
|
||||
|
||||
HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite()));
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.SimpleChannelInboundHandler;
|
||||
import io.netty.channel.embedded.EmbeddedChannel;
|
||||
import io.netty.handler.codec.http.DefaultFullHttpRequest;
|
||||
import io.netty.handler.codec.http.DefaultFullHttpResponse;
|
||||
import io.netty.handler.codec.http.DefaultHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.HttpMethod;
|
||||
import io.netty.handler.codec.http.HttpRequest;
|
||||
import io.netty.handler.codec.http.HttpVersion;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.handler.codec.http.QueryStringDecoder;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.LinkedTransferQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
|
||||
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
|
||||
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class NioHttpPipeliningHandlerTests extends ESTestCase {
|
||||
|
||||
private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8));
|
||||
private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1);
|
||||
private final Map<String, CountDownLatch> waitingRequests = new ConcurrentHashMap<>();
|
||||
private final Map<String, CountDownLatch> finishingRequests = new ConcurrentHashMap<>();
|
||||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
waitingRequests.keySet().forEach(this::finishRequest);
|
||||
shutdownExecutorService();
|
||||
}
|
||||
|
||||
private CountDownLatch finishRequest(String url) {
|
||||
waitingRequests.get(url).countDown();
|
||||
return finishingRequests.get(url);
|
||||
}
|
||||
|
||||
private void shutdownExecutorService() throws InterruptedException {
|
||||
if (!handlerService.isShutdown()) {
|
||||
handlerService.shutdown();
|
||||
handlerService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
if (!eventLoopService.isShutdown()) {
|
||||
eventLoopService.shutdown();
|
||||
eventLoopService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i)));
|
||||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
for (final String url : waitingRequests.keySet()) {
|
||||
latches.add(finishRequest(url));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i));
|
||||
}
|
||||
|
||||
assertTrue(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i)));
|
||||
}
|
||||
|
||||
// random order execution
|
||||
final List<String> urls = new ArrayList<>(waitingRequests.keySet());
|
||||
Randomness.shuffle(urls);
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
for (final String url : urls) {
|
||||
latches.add(finishRequest(url));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i));
|
||||
}
|
||||
|
||||
assertTrue(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(
|
||||
new AggregateUrisAndHeadersHandler(),
|
||||
new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
final DefaultHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/" + i);
|
||||
embeddedChannel.writeInbound(request);
|
||||
embeddedChannel.writeInbound(LastHttpContent.EMPTY_LAST_CONTENT);
|
||||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
for (int i = numberOfRequests - 1; i >= 0; i--) {
|
||||
latches.add(finishRequest(Integer.toString(i)));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
assertReadHttpMessageHasContent(embeddedChannel, Integer.toString(i));
|
||||
}
|
||||
|
||||
assertTrue(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < 1 + numberOfRequests + 1; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + Integer.toString(i)));
|
||||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
final List<Integer> requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList());
|
||||
Randomness.shuffle(requests);
|
||||
|
||||
for (final Integer request : requests) {
|
||||
latches.add(finishRequest(request.toString()));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
finishRequest(Integer.toString(numberOfRequests + 1)).await();
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
assertFalse(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testPipeliningRequestsAreReleased() throws InterruptedException {
|
||||
final int numberOfRequests = 10;
|
||||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests + 1));
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + i));
|
||||
}
|
||||
|
||||
HttpPipelinedRequest<FullHttpRequest> inbound;
|
||||
ArrayList<HttpPipelinedRequest<FullHttpRequest>> requests = new ArrayList<>();
|
||||
while ((inbound = embeddedChannel.readInbound()) != null) {
|
||||
requests.add(inbound);
|
||||
}
|
||||
|
||||
ArrayList<ChannelPromise> promises = new ArrayList<>();
|
||||
for (int i = 1; i < requests.size(); ++i) {
|
||||
final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK);
|
||||
ChannelPromise promise = embeddedChannel.newPromise();
|
||||
promises.add(promise);
|
||||
int sequence = requests.get(i).getSequence();
|
||||
NioHttpResponse resp = new NioHttpResponse(sequence, httpResponse);
|
||||
embeddedChannel.writeAndFlush(resp, promise);
|
||||
}
|
||||
|
||||
for (ChannelPromise promise : promises) {
|
||||
assertFalse(promise.isDone());
|
||||
}
|
||||
embeddedChannel.close().syncUninterruptibly();
|
||||
for (ChannelPromise promise : promises) {
|
||||
assertTrue(promise.isDone());
|
||||
assertTrue(promise.cause() instanceof ClosedChannelException);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertReadHttpMessageHasContent(EmbeddedChannel embeddedChannel, String expectedContent) {
|
||||
FullHttpResponse response = (FullHttpResponse) embeddedChannel.outboundMessages().poll();
|
||||
assertNotNull("Expected response to exist, maybe you did not wait long enough?", response);
|
||||
assertNotNull("Expected response to have content " + expectedContent, response.content());
|
||||
String data = new String(ByteBufUtil.getBytes(response.content()), StandardCharsets.UTF_8);
|
||||
assertThat(data, is(expectedContent));
|
||||
}
|
||||
|
||||
private FullHttpRequest createHttpRequest(String uri) {
|
||||
return new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uri);
|
||||
}
|
||||
|
||||
private static class AggregateUrisAndHeadersHandler extends SimpleChannelInboundHandler<HttpRequest> {
|
||||
|
||||
static final Queue<String> QUEUE_URI = new LinkedTransferQueue<>();
|
||||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) throws Exception {
|
||||
QUEUE_URI.add(request.uri());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class WorkEmulatorHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<LastHttpContent>> {
|
||||
|
||||
@Override
|
||||
protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest<LastHttpContent> pipelinedRequest) {
|
||||
LastHttpContent request = pipelinedRequest.getRequest();
|
||||
final QueryStringDecoder decoder;
|
||||
if (request instanceof FullHttpRequest) {
|
||||
decoder = new QueryStringDecoder(((FullHttpRequest)request).uri());
|
||||
} else {
|
||||
decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll());
|
||||
}
|
||||
|
||||
final String uri = decoder.path().replace("/", "");
|
||||
final ByteBuf content = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8);
|
||||
final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK, content);
|
||||
httpResponse.headers().add(CONTENT_LENGTH, content.readableBytes());
|
||||
|
||||
final CountDownLatch waitingLatch = new CountDownLatch(1);
|
||||
waitingRequests.put(uri, waitingLatch);
|
||||
final CountDownLatch finishingLatch = new CountDownLatch(1);
|
||||
finishingRequests.put(uri, finishingLatch);
|
||||
|
||||
handlerService.submit(() -> {
|
||||
try {
|
||||
waitingLatch.await(1000, TimeUnit.SECONDS);
|
||||
final ChannelPromise promise = ctx.newPromise();
|
||||
eventLoopService.submit(() -> {
|
||||
ctx.write(new NioHttpResponse(pipelinedRequest.getSequence(), httpResponse), promise);
|
||||
finishingLatch.countDown();
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
fail(e.toString());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,65 +16,53 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.ESNetty4IntegTestCase;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.NioIntegTestCase;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase {
|
||||
public class NioPipeliningIT extends NioIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("http.pipelining", false)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testThatNettyHttpServerDoesNotSupportPipelining() throws Exception {
|
||||
ensureGreen();
|
||||
String[] requests = new String[] {"/", "/_nodes/stats", "/", "/_cluster/state", "/", "/_nodes", "/"};
|
||||
public void testThatNioHttpServerSupportsPipelining() throws Exception {
|
||||
String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"};
|
||||
|
||||
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
|
||||
TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses();
|
||||
TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses);
|
||||
TransportAddress transportAddress = randomFrom(boundAddresses);
|
||||
|
||||
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
|
||||
Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests);
|
||||
assertThat(responses, hasSize(requests.length));
|
||||
assertThat(responses, hasSize(5));
|
||||
|
||||
List<String> opaqueIds = new ArrayList<>(Netty4HttpClient.returnOpaqueIds(responses));
|
||||
|
||||
assertResponsesOutOfOrder(opaqueIds);
|
||||
Collection<String> opaqueIds = Netty4HttpClient.returnOpaqueIds(responses);
|
||||
assertOpaqueIdsInOrder(opaqueIds);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if all responses are there, but also tests that they are out of order because pipelining is disabled
|
||||
*/
|
||||
private void assertResponsesOutOfOrder(List<String> opaqueIds) {
|
||||
String message = String.format(Locale.ROOT, "Expected returned http message ids to be in any order of: %s", opaqueIds);
|
||||
assertThat(message, opaqueIds, containsInAnyOrder("0", "1", "2", "3", "4", "5", "6"));
|
||||
private void assertOpaqueIdsInOrder(Collection<String> opaqueIds) {
|
||||
// check if opaque ids are monotonically increasing
|
||||
int i = 0;
|
||||
String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds);
|
||||
for (String opaqueId : opaqueIds) {
|
||||
assertThat(msg, opaqueId, is(String.valueOf(i++)));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -29,9 +29,9 @@ plugins {
|
|||
dependencies {
|
||||
compile "junit:junit:${versions.junit}"
|
||||
compile "org.hamcrest:hamcrest-core:${versions.hamcrest}"
|
||||
compile "org.hamcrest:hamcrest-library:${versions.hamcrest}"
|
||||
|
||||
// needs to be on the classpath for JarHell
|
||||
testRuntime project(':libs:elasticsearch-core')
|
||||
compile project(':libs:elasticsearch-core')
|
||||
|
||||
// pulls in the jar built by this project and its dependencies
|
||||
packagingTest project(path: project.path, configuration: 'runtime')
|
||||
|
|
|
@ -19,13 +19,20 @@
|
|||
|
||||
package org.elasticsearch.packaging;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.elasticsearch.packaging.test.OssTarTests;
|
||||
import org.elasticsearch.packaging.test.OssZipTests;
|
||||
import org.elasticsearch.packaging.test.DefaultTarTests;
|
||||
import org.elasticsearch.packaging.test.DefaultZipTests;
|
||||
|
||||
/**
|
||||
* This class doesn't have any tests yet
|
||||
*/
|
||||
public class PackagingTests {
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Suite;
|
||||
import org.junit.runners.Suite.SuiteClasses;
|
||||
|
||||
@Test
|
||||
public void testDummy() {}
|
||||
}
|
||||
@RunWith(Suite.class)
|
||||
@SuiteClasses({
|
||||
DefaultTarTests.class,
|
||||
DefaultZipTests.class,
|
||||
OssTarTests.class,
|
||||
OssZipTests.class
|
||||
})
|
||||
public class PackagingTests {}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging;
|
||||
|
||||
import org.junit.runner.JUnitCore;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
/**
|
||||
* Ensures that the current JVM is running on a virtual machine before delegating to {@link JUnitCore}. We just check for the existence
|
||||
* of a special file that we create during VM provisioning.
|
||||
*/
|
||||
public class VMTestRunner {
|
||||
public static void main(String[] args) {
|
||||
if (Files.exists(Paths.get("/is_vagrant_vm"))) {
|
||||
JUnitCore.main(args);
|
||||
} else {
|
||||
throw new RuntimeException("This filesystem does not have an expected marker file indicating it's a virtual machine. These " +
|
||||
"tests should only run in a virtual machine because they're destructive.");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.test;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.FixMethodOrder;
|
||||
import org.junit.Test;
|
||||
import org.junit.runners.MethodSorters;
|
||||
|
||||
import org.elasticsearch.packaging.util.Distribution;
|
||||
import org.elasticsearch.packaging.util.Installation;
|
||||
|
||||
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
|
||||
import static org.elasticsearch.packaging.util.Archives.installArchive;
|
||||
import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.junit.Assume.assumeThat;
|
||||
|
||||
/**
|
||||
* Tests that apply to the archive distributions (tar, zip). To add a case for a distribution, subclass and
|
||||
* override {@link ArchiveTestCase#distribution()}. These tests should be the same across all archive distributions
|
||||
*/
|
||||
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
|
||||
public abstract class ArchiveTestCase {
|
||||
|
||||
private static Installation installation;
|
||||
|
||||
/** The {@link Distribution} that should be tested in this case */
|
||||
protected abstract Distribution distribution();
|
||||
|
||||
@BeforeClass
|
||||
public static void cleanup() {
|
||||
installation = null;
|
||||
cleanEverything();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void onlyCompatibleDistributions() {
|
||||
assumeThat(distribution().packaging.compatible, is(true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test10Install() {
|
||||
installation = installArchive(distribution());
|
||||
verifyArchiveInstallation(installation, distribution());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.test;
|
||||
|
||||
import org.elasticsearch.packaging.util.Distribution;
|
||||
|
||||
public class DefaultTarTests extends ArchiveTestCase {
|
||||
|
||||
@Override
|
||||
protected Distribution distribution() {
|
||||
return Distribution.DEFAULT_TAR;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.test;
|
||||
|
||||
import org.elasticsearch.packaging.util.Distribution;
|
||||
|
||||
public class DefaultZipTests extends ArchiveTestCase {
|
||||
|
||||
@Override
|
||||
protected Distribution distribution() {
|
||||
return Distribution.DEFAULT_ZIP;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.test;
|
||||
|
||||
import org.elasticsearch.packaging.util.Distribution;
|
||||
|
||||
public class OssTarTests extends ArchiveTestCase {
|
||||
|
||||
@Override
|
||||
protected Distribution distribution() {
|
||||
return Distribution.OSS_TAR;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.test;
|
||||
|
||||
import org.elasticsearch.packaging.util.Distribution;
|
||||
|
||||
public class OssZipTests extends ArchiveTestCase {
|
||||
|
||||
@Override
|
||||
protected Distribution distribution() {
|
||||
return Distribution.OSS_ZIP;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory;
|
||||
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File;
|
||||
import static org.elasticsearch.packaging.util.FileMatcher.file;
|
||||
import static org.elasticsearch.packaging.util.FileMatcher.p644;
|
||||
import static org.elasticsearch.packaging.util.FileMatcher.p660;
|
||||
import static org.elasticsearch.packaging.util.FileMatcher.p755;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getDefaultArchiveInstallPath;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getPackagingArchivesDir;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.lsGlob;
|
||||
|
||||
import static org.elasticsearch.packaging.util.FileUtils.mv;
|
||||
import static org.elasticsearch.packaging.util.Platforms.isDPKG;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.hamcrest.collection.IsEmptyCollection.empty;
|
||||
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
|
||||
|
||||
/**
|
||||
* Installation and verification logic for archive distributions
|
||||
*/
|
||||
public class Archives {
|
||||
|
||||
public static Installation installArchive(Distribution distribution) {
|
||||
return installArchive(distribution, getDefaultArchiveInstallPath(), getCurrentVersion());
|
||||
}
|
||||
|
||||
public static Installation installArchive(Distribution distribution, Path fullInstallPath, String version) {
|
||||
final Shell sh = new Shell();
|
||||
|
||||
final Path distributionFile = getPackagingArchivesDir().resolve(distribution.filename(version));
|
||||
final Path baseInstallPath = fullInstallPath.getParent();
|
||||
final Path extractedPath = baseInstallPath.resolve("elasticsearch-" + version);
|
||||
|
||||
assertThat("distribution file must exist", Files.exists(distributionFile), is(true));
|
||||
assertThat("elasticsearch must not already be installed", lsGlob(baseInstallPath, "elasticsearch*"), empty());
|
||||
|
||||
if (distribution.packaging == Distribution.Packaging.TAR) {
|
||||
|
||||
if (Platforms.LINUX) {
|
||||
sh.run("tar", "-C", baseInstallPath.toString(), "-xzpf", distributionFile.toString());
|
||||
} else {
|
||||
throw new RuntimeException("Distribution " + distribution + " is not supported on windows");
|
||||
}
|
||||
|
||||
} else if (distribution.packaging == Distribution.Packaging.ZIP) {
|
||||
|
||||
if (Platforms.LINUX) {
|
||||
sh.run("unzip", distributionFile.toString(), "-d", baseInstallPath.toString());
|
||||
} else {
|
||||
sh.run("powershell.exe", "-Command",
|
||||
"Add-Type -AssemblyName 'System.IO.Compression.Filesystem'; " +
|
||||
"[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')");
|
||||
}
|
||||
|
||||
} else {
|
||||
throw new RuntimeException("Distribution " + distribution + " is not a known archive type");
|
||||
}
|
||||
|
||||
assertThat("archive was extracted", Files.exists(extractedPath), is(true));
|
||||
|
||||
mv(extractedPath, fullInstallPath);
|
||||
|
||||
assertThat("extracted archive moved to install location", Files.exists(fullInstallPath));
|
||||
final List<Path> installations = lsGlob(baseInstallPath, "elasticsearch*");
|
||||
assertThat("only the intended installation exists", installations, hasSize(1));
|
||||
assertThat("only the intended installation exists", installations.get(0), is(fullInstallPath));
|
||||
|
||||
if (Platforms.LINUX) {
|
||||
setupArchiveUsersLinux(fullInstallPath);
|
||||
}
|
||||
|
||||
return new Installation(fullInstallPath);
|
||||
}
|
||||
|
||||
private static void setupArchiveUsersLinux(Path installPath) {
|
||||
final Shell sh = new Shell();
|
||||
|
||||
if (sh.runIgnoreExitCode("getent", "group", "elasticsearch").isSuccess() == false) {
|
||||
if (isDPKG()) {
|
||||
sh.run("addgroup", "--system", "elasticsearch");
|
||||
} else {
|
||||
sh.run("groupadd", "-r", "elasticsearch");
|
||||
}
|
||||
}
|
||||
|
||||
if (sh.runIgnoreExitCode("id", "elasticsearch").isSuccess() == false) {
|
||||
if (isDPKG()) {
|
||||
sh.run("adduser",
|
||||
"--quiet",
|
||||
"--system",
|
||||
"--no-create-home",
|
||||
"--ingroup", "elasticsearch",
|
||||
"--disabled-password",
|
||||
"--shell", "/bin/false",
|
||||
"elasticsearch");
|
||||
} else {
|
||||
sh.run("useradd",
|
||||
"--system",
|
||||
"-M",
|
||||
"--gid", "elasticsearch",
|
||||
"--shell", "/sbin/nologin",
|
||||
"--comment", "elasticsearch user",
|
||||
"elasticsearch");
|
||||
}
|
||||
}
|
||||
sh.run("chown", "-R", "elasticsearch:elasticsearch", installPath.toString());
|
||||
}
|
||||
|
||||
public static void verifyArchiveInstallation(Installation installation, Distribution distribution) {
|
||||
// on Windows for now we leave the installation owned by the vagrant user that the tests run as. Since the vagrant account
|
||||
// is a local administrator, the files really end up being owned by the local administrators group. In the future we'll
|
||||
// install and run elasticesearch with a role user on Windows
|
||||
final String owner = Platforms.WINDOWS
|
||||
? "BUILTIN\\Administrators"
|
||||
: "elasticsearch";
|
||||
|
||||
verifyOssInstallation(installation, distribution, owner);
|
||||
if (distribution.flavor == Distribution.Flavor.DEFAULT) {
|
||||
verifyDefaultInstallation(installation, distribution, owner);
|
||||
}
|
||||
}
|
||||
|
||||
private static void verifyOssInstallation(Installation es, Distribution distribution, String owner) {
|
||||
Stream.of(
|
||||
es.home,
|
||||
es.config,
|
||||
es.plugins,
|
||||
es.modules,
|
||||
es.logs
|
||||
).forEach(dir -> assertThat(dir, file(Directory, owner, owner, p755)));
|
||||
|
||||
assertThat(Files.exists(es.data), is(false));
|
||||
assertThat(Files.exists(es.scripts), is(false));
|
||||
|
||||
assertThat(es.home.resolve("bin"), file(Directory, owner, owner, p755));
|
||||
assertThat(es.home.resolve("lib"), file(Directory, owner, owner, p755));
|
||||
assertThat(Files.exists(es.config.resolve("elasticsearch.keystore")), is(false));
|
||||
|
||||
Stream.of(
|
||||
"bin/elasticsearch",
|
||||
"bin/elasticsearch-env",
|
||||
"bin/elasticsearch-keystore",
|
||||
"bin/elasticsearch-plugin",
|
||||
"bin/elasticsearch-translog"
|
||||
).forEach(executable -> {
|
||||
|
||||
assertThat(es.home.resolve(executable), file(File, owner, owner, p755));
|
||||
|
||||
if (distribution.packaging == Distribution.Packaging.ZIP) {
|
||||
assertThat(es.home.resolve(executable + ".bat"), file(File, owner));
|
||||
}
|
||||
});
|
||||
|
||||
if (distribution.packaging == Distribution.Packaging.ZIP) {
|
||||
Stream.of(
|
||||
"bin/elasticsearch-service.bat",
|
||||
"bin/elasticsearch-service-mgr.exe",
|
||||
"bin/elasticsearch-service-x64.exe"
|
||||
).forEach(executable -> assertThat(es.home.resolve(executable), file(File, owner)));
|
||||
}
|
||||
|
||||
Stream.of(
|
||||
"elasticsearch.yml",
|
||||
"jvm.options",
|
||||
"log4j2.properties"
|
||||
).forEach(config -> assertThat(es.config.resolve(config), file(File, owner, owner, p660)));
|
||||
|
||||
Stream.of(
|
||||
"NOTICE.txt",
|
||||
"LICENSE.txt",
|
||||
"README.textile"
|
||||
).forEach(doc -> assertThat(es.home.resolve(doc), file(File, owner, owner, p644)));
|
||||
}
|
||||
|
||||
private static void verifyDefaultInstallation(Installation es, Distribution distribution, String owner) {
|
||||
|
||||
Stream.of(
|
||||
"bin/elasticsearch-certgen",
|
||||
"bin/elasticsearch-certutil",
|
||||
"bin/elasticsearch-croneval",
|
||||
"bin/elasticsearch-migrate",
|
||||
"bin/elasticsearch-saml-metadata",
|
||||
"bin/elasticsearch-setup-passwords",
|
||||
"bin/elasticsearch-sql-cli",
|
||||
"bin/elasticsearch-syskeygen",
|
||||
"bin/elasticsearch-users",
|
||||
"bin/x-pack-env",
|
||||
"bin/x-pack-security-env",
|
||||
"bin/x-pack-watcher-env"
|
||||
).forEach(executable -> {
|
||||
|
||||
assertThat(es.home.resolve(executable), file(File, owner, owner, p755));
|
||||
|
||||
if (distribution.packaging == Distribution.Packaging.ZIP) {
|
||||
assertThat(es.home.resolve(executable + ".bat"), file(File, owner));
|
||||
}
|
||||
});
|
||||
|
||||
// at this time we only install the current version of archive distributions, but if that changes we'll need to pass
|
||||
// the version through here
|
||||
assertThat(es.home.resolve("bin/elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), file(File, owner, owner, p755));
|
||||
|
||||
Stream.of(
|
||||
"users",
|
||||
"users_roles",
|
||||
"roles.yml",
|
||||
"role_mapping.yml",
|
||||
"log4j2.properties"
|
||||
).forEach(config -> assertThat(es.config.resolve(config), file(File, owner, owner, p660)));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getTempDir;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.lsGlob;
|
||||
import static org.elasticsearch.packaging.util.Platforms.isAptGet;
|
||||
import static org.elasticsearch.packaging.util.Platforms.isDPKG;
|
||||
import static org.elasticsearch.packaging.util.Platforms.isRPM;
|
||||
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
|
||||
import static org.elasticsearch.packaging.util.Platforms.isYUM;
|
||||
|
||||
public class Cleanup {
|
||||
|
||||
private static final List<String> ELASTICSEARCH_FILES_LINUX = Arrays.asList(
|
||||
"/usr/share/elasticsearch",
|
||||
"/etc/elasticsearch",
|
||||
"/var/lib/elasticsearch",
|
||||
"/var/log/elasticsearch",
|
||||
"/etc/default/elasticsearch",
|
||||
"/etc/sysconfig/elasticsearch",
|
||||
"/var/run/elasticsearch",
|
||||
"/usr/share/doc/elasticsearch",
|
||||
"/usr/lib/systemd/system/elasticsearch.conf",
|
||||
"/usr/lib/tmpfiles.d/elasticsearch.conf",
|
||||
"/usr/lib/sysctl.d/elasticsearch.conf"
|
||||
);
|
||||
|
||||
// todo
|
||||
private static final List<String> ELASTICSEARCH_FILES_WINDOWS = Collections.emptyList();
|
||||
|
||||
public static void cleanEverything() {
|
||||
final Shell sh = new Shell();
|
||||
|
||||
// kill elasticsearch processes
|
||||
if (Platforms.WINDOWS) {
|
||||
|
||||
// the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here
|
||||
sh.runIgnoreExitCode("powershell.exe", "-Command",
|
||||
"Get-WmiObject Win32_Process | " +
|
||||
"Where-Object { $_.CommandLine -Match 'org.elasticsearch.bootstrap.Elasticsearch' } | " +
|
||||
"ForEach-Object { $_.Terminate() }");
|
||||
|
||||
} else {
|
||||
|
||||
sh.runIgnoreExitCode("pkill", "-u", "elasticsearch");
|
||||
sh.runIgnoreExitCode("bash", "-c",
|
||||
"ps aux | grep -i 'org.elasticsearch.bootstrap.Elasticsearch' | awk {'print $2'} | xargs kill -9");
|
||||
|
||||
}
|
||||
|
||||
if (Platforms.LINUX) {
|
||||
purgePackagesLinux();
|
||||
}
|
||||
|
||||
// remove elasticsearch users
|
||||
if (Platforms.LINUX) {
|
||||
sh.runIgnoreExitCode("userdel", "elasticsearch");
|
||||
sh.runIgnoreExitCode("groupdel", "elasticsearch");
|
||||
}
|
||||
|
||||
// delete files that may still exist
|
||||
lsGlob(getTempDir(), "elasticsearch*").forEach(FileUtils::rm);
|
||||
final List<String> filesToDelete = Platforms.WINDOWS
|
||||
? ELASTICSEARCH_FILES_WINDOWS
|
||||
: ELASTICSEARCH_FILES_LINUX;
|
||||
filesToDelete.stream()
|
||||
.map(Paths::get)
|
||||
.filter(Files::exists)
|
||||
.forEach(FileUtils::rm);
|
||||
|
||||
// disable elasticsearch service
|
||||
// todo add this for windows when adding tests for service intallation
|
||||
if (Platforms.LINUX && isSystemd()) {
|
||||
sh.run("systemctl", "unmask", "systemd-sysctl.service");
|
||||
}
|
||||
}
|
||||
|
||||
private static void purgePackagesLinux() {
|
||||
final Shell sh = new Shell();
|
||||
|
||||
if (isRPM()) {
|
||||
sh.runIgnoreExitCode("rpm", "--quiet", "-e", "elasticsearch", "elasticsearch-oss");
|
||||
}
|
||||
|
||||
if (isYUM()) {
|
||||
sh.runIgnoreExitCode("yum", "remove", "-y", "elasticsearch", "elasticsearch-oss");
|
||||
}
|
||||
|
||||
if (isDPKG()) {
|
||||
sh.runIgnoreExitCode("dpkg", "--purge", "elasticsearch", "elasticsearch-oss");
|
||||
}
|
||||
|
||||
if (isAptGet()) {
|
||||
sh.runIgnoreExitCode("apt-get", "--quiet", "--yes", "purge", "elasticsearch", "elasticsearch-oss");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
public enum Distribution {
|
||||
|
||||
OSS_TAR(Packaging.TAR, Flavor.OSS),
|
||||
OSS_ZIP(Packaging.ZIP, Flavor.OSS),
|
||||
OSS_DEB(Packaging.DEB, Flavor.OSS),
|
||||
OSS_RPM(Packaging.RPM, Flavor.OSS),
|
||||
|
||||
DEFAULT_TAR(Packaging.TAR, Flavor.DEFAULT),
|
||||
DEFAULT_ZIP(Packaging.ZIP, Flavor.DEFAULT),
|
||||
DEFAULT_DEB(Packaging.DEB, Flavor.DEFAULT),
|
||||
DEFAULT_RPM(Packaging.RPM, Flavor.DEFAULT);
|
||||
|
||||
public final Packaging packaging;
|
||||
public final Flavor flavor;
|
||||
|
||||
Distribution(Packaging packaging, Flavor flavor) {
|
||||
this.packaging = packaging;
|
||||
this.flavor = flavor;
|
||||
}
|
||||
|
||||
public String filename(String version) {
|
||||
return flavor.name + "-" + version + packaging.extension;
|
||||
}
|
||||
|
||||
public enum Packaging {
|
||||
|
||||
TAR(".tar.gz", Platforms.LINUX),
|
||||
ZIP(".zip", true),
|
||||
DEB(".deb", Platforms.isDPKG()),
|
||||
RPM(".rpm", Platforms.isRPM());
|
||||
|
||||
/** The extension of this distribution's file */
|
||||
public final String extension;
|
||||
|
||||
/** Whether the distribution is intended for use on the platform the current JVM is running on */
|
||||
public final boolean compatible;
|
||||
|
||||
Packaging(String extension, boolean compatible) {
|
||||
this.extension = extension;
|
||||
this.compatible = compatible;
|
||||
}
|
||||
}
|
||||
|
||||
public enum Flavor {
|
||||
|
||||
OSS("elasticsearch-oss"),
|
||||
DEFAULT("elasticsearch");
|
||||
|
||||
public final String name;
|
||||
|
||||
Flavor(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.TypeSafeMatcher;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getBasicFileAttributes;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getFileOwner;
|
||||
import static org.elasticsearch.packaging.util.FileUtils.getPosixFileAttributes;
|
||||
|
||||
import static java.nio.file.attribute.PosixFilePermissions.fromString;
|
||||
|
||||
/**
|
||||
* Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission
|
||||
* set is what we expect.
|
||||
*
|
||||
* This class saves information about its failed matches in instance variables and so instances should not be reused
|
||||
*/
|
||||
public class FileMatcher extends TypeSafeMatcher<Path> {
|
||||
|
||||
public enum Fileness { File, Directory }
|
||||
|
||||
public static final Set<PosixFilePermission> p755 = fromString("rwxr-xr-x");
|
||||
public static final Set<PosixFilePermission> p660 = fromString("rw-rw----");
|
||||
public static final Set<PosixFilePermission> p644 = fromString("rw-r--r--");
|
||||
|
||||
private final Fileness fileness;
|
||||
private final String owner;
|
||||
private final String group;
|
||||
private final Set<PosixFilePermission> posixPermissions;
|
||||
|
||||
private String mismatch;
|
||||
|
||||
public FileMatcher(Fileness fileness, String owner, String group, Set<PosixFilePermission> posixPermissions) {
|
||||
this.fileness = Objects.requireNonNull(fileness);
|
||||
this.owner = Objects.requireNonNull(owner);
|
||||
this.group = group;
|
||||
this.posixPermissions = posixPermissions;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean matchesSafely(Path path) {
|
||||
if (Files.exists(path) == false) {
|
||||
mismatch = "Does not exist";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Platforms.WINDOWS) {
|
||||
final BasicFileAttributes attributes = getBasicFileAttributes(path);
|
||||
final String attributeViewOwner = getFileOwner(path);
|
||||
|
||||
if (fileness.equals(Fileness.Directory) != attributes.isDirectory()) {
|
||||
mismatch = "Is " + (attributes.isDirectory() ? "a directory" : "a file");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (attributeViewOwner.contains(owner) == false) {
|
||||
mismatch = "Owned by " + attributeViewOwner;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
final PosixFileAttributes attributes = getPosixFileAttributes(path);
|
||||
|
||||
if (fileness.equals(Fileness.Directory) != attributes.isDirectory()) {
|
||||
mismatch = "Is " + (attributes.isDirectory() ? "a directory" : "a file");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (owner.equals(attributes.owner().getName()) == false) {
|
||||
mismatch = "Owned by " + attributes.owner().getName();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (group != null && group.equals(attributes.group().getName()) == false) {
|
||||
mismatch = "Owned by group " + attributes.group().getName();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (posixPermissions != null && posixPermissions.equals(attributes.permissions()) == false) {
|
||||
mismatch = "Has permissions " + attributes.permissions();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeMismatchSafely(Path path, Description description) {
|
||||
description.appendText("path ").appendValue(path);
|
||||
if (mismatch != null) {
|
||||
description.appendText(mismatch);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
description.appendValue("file/directory: ").appendValue(fileness)
|
||||
.appendText(" with owner ").appendValue(owner)
|
||||
.appendText(" with group ").appendValue(group)
|
||||
.appendText(" with posix permissions ").appendValueList("[", ",", "]", posixPermissions);
|
||||
}
|
||||
|
||||
public static FileMatcher file(Fileness fileness, String owner) {
|
||||
return file(fileness, owner, null, null);
|
||||
}
|
||||
|
||||
public static FileMatcher file(Fileness fileness, String owner, String group, Set<PosixFilePermission> permissions) {
|
||||
return new FileMatcher(fileness, owner, group, permissions);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.FileOwnerAttributeView;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.core.IsNot.not;
|
||||
import static org.hamcrest.text.IsEmptyString.isEmptyOrNullString;
|
||||
|
||||
/**
|
||||
* Wrappers and convenience methods for common filesystem operations
|
||||
*/
|
||||
public class FileUtils {
|
||||
|
||||
public static List<Path> lsGlob(Path directory, String glob) {
|
||||
List<Path> paths = new ArrayList<>();
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory, glob)) {
|
||||
|
||||
for (Path path : stream) {
|
||||
paths.add(path);
|
||||
}
|
||||
return paths;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void rm(Path... paths) {
|
||||
try {
|
||||
IOUtils.rm(paths);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static Path mv(Path source, Path target) {
|
||||
try {
|
||||
return Files.move(source, target);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static String slurp(Path file) {
|
||||
try {
|
||||
return String.join("\n", Files.readAllLines(file));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the owner of a file in a way that should be supported by all filesystems that have a concept of file owner
|
||||
*/
|
||||
public static String getFileOwner(Path path) {
|
||||
try {
|
||||
FileOwnerAttributeView view = Files.getFileAttributeView(path, FileOwnerAttributeView.class);
|
||||
return view.getOwner().getName();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets attributes that are supported by all filesystems
|
||||
*/
|
||||
public static BasicFileAttributes getBasicFileAttributes(Path path) {
|
||||
try {
|
||||
return Files.readAttributes(path, BasicFileAttributes.class);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets attributes that are supported by posix filesystems
|
||||
*/
|
||||
public static PosixFileAttributes getPosixFileAttributes(Path path) {
|
||||
try {
|
||||
return Files.readAttributes(path, PosixFileAttributes.class);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// vagrant creates /tmp for us in windows so we use that to avoid long paths
|
||||
public static Path getTempDir() {
|
||||
return Paths.get("/tmp");
|
||||
}
|
||||
|
||||
public static Path getDefaultArchiveInstallPath() {
|
||||
return getTempDir().resolve("elasticsearch");
|
||||
}
|
||||
|
||||
public static String getCurrentVersion() {
|
||||
return slurp(getPackagingArchivesDir().resolve("version"));
|
||||
}
|
||||
|
||||
public static Path getPackagingArchivesDir() {
|
||||
String fromEnv = System.getenv("PACKAGING_ARCHIVES");
|
||||
assertThat(fromEnv, not(isEmptyOrNullString()));
|
||||
return Paths.get(fromEnv);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
import java.nio.file.Path;
|
||||
|
||||
/**
|
||||
* Represents an installation of Elasticsearch
|
||||
*/
|
||||
public class Installation {
|
||||
|
||||
public final Path home;
|
||||
public final Path config;
|
||||
public final Path data;
|
||||
public final Path logs;
|
||||
public final Path plugins;
|
||||
public final Path modules;
|
||||
public final Path scripts;
|
||||
|
||||
public Installation(Path home, Path config, Path data, Path logs, Path plugins, Path modules, Path scripts) {
|
||||
this.home = home;
|
||||
this.config = config;
|
||||
this.data = data;
|
||||
this.logs = logs;
|
||||
this.plugins = plugins;
|
||||
this.modules = modules;
|
||||
this.scripts = scripts;
|
||||
}
|
||||
|
||||
public Installation(Path home) {
|
||||
this(
|
||||
home,
|
||||
home.resolve("config"),
|
||||
home.resolve("data"),
|
||||
home.resolve("logs"),
|
||||
home.resolve("plugins"),
|
||||
home.resolve("modules"),
|
||||
home.resolve("scripts")
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
public class Platforms {
|
||||
public static final String OS_NAME = System.getProperty("os.name");
|
||||
public static final boolean LINUX = OS_NAME.startsWith("Linux");
|
||||
public static final boolean WINDOWS = OS_NAME.startsWith("Windows");
|
||||
|
||||
public static boolean isDPKG() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "dpkg").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isAptGet() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "apt-get").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isRPM() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "rpm").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isYUM() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "yum").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isSystemd() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "systemctl").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isSysVInit() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "service").isSuccess();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,193 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging.util;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
/**
|
||||
* Wrapper to run shell commands and collect their outputs in a less verbose way
|
||||
*/
|
||||
public class Shell {
|
||||
|
||||
final Map<String, String> env;
|
||||
final Path workingDirectory;
|
||||
|
||||
public Shell() {
|
||||
this(emptyMap(), null);
|
||||
}
|
||||
|
||||
public Shell(Map<String, String> env) {
|
||||
this(env, null);
|
||||
}
|
||||
|
||||
public Shell(Path workingDirectory) {
|
||||
this(emptyMap(), workingDirectory);
|
||||
}
|
||||
|
||||
public Shell(Map<String, String> env, Path workingDirectory) {
|
||||
this.env = new HashMap<>(env);
|
||||
this.workingDirectory = workingDirectory;
|
||||
}
|
||||
|
||||
public Result run(String... command) {
|
||||
Result result = runIgnoreExitCode(command);
|
||||
if (result.isSuccess() == false) {
|
||||
throw new RuntimeException("Command was not successful: [" + String.join(" ", command) + "] result: " + result.toString());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public Result runIgnoreExitCode(String... command) {
|
||||
ProcessBuilder builder = new ProcessBuilder();
|
||||
builder.command(command);
|
||||
|
||||
if (workingDirectory != null) {
|
||||
setWorkingDirectory(builder, workingDirectory);
|
||||
}
|
||||
|
||||
if (env != null && env.isEmpty() == false) {
|
||||
for (Map.Entry<String, String> entry : env.entrySet()) {
|
||||
builder.environment().put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
Process process = builder.start();
|
||||
|
||||
StringBuilder stdout = new StringBuilder();
|
||||
StringBuilder stderr = new StringBuilder();
|
||||
|
||||
Thread stdoutThread = new Thread(new StreamCollector(process.getInputStream(), stdout));
|
||||
Thread stderrThread = new Thread(new StreamCollector(process.getErrorStream(), stderr));
|
||||
|
||||
stdoutThread.start();
|
||||
stderrThread.start();
|
||||
|
||||
stdoutThread.join();
|
||||
stderrThread.join();
|
||||
|
||||
int exitCode = process.waitFor();
|
||||
|
||||
return new Result(exitCode, stdout.toString(), stderr.toString());
|
||||
|
||||
} catch (IOException | InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "ProcessBuilder expects java.io.File")
|
||||
private static void setWorkingDirectory(ProcessBuilder builder, Path path) {
|
||||
builder.directory(path.toFile());
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return new StringBuilder()
|
||||
.append("<")
|
||||
.append(this.getClass().getName())
|
||||
.append(" ")
|
||||
.append("env = [")
|
||||
.append(env)
|
||||
.append("]")
|
||||
.append("workingDirectory = [")
|
||||
.append(workingDirectory)
|
||||
.append("]")
|
||||
.append(">")
|
||||
.toString();
|
||||
}
|
||||
|
||||
public static class Result {
|
||||
public final int exitCode;
|
||||
public final String stdout;
|
||||
public final String stderr;
|
||||
|
||||
public Result(int exitCode, String stdout, String stderr) {
|
||||
this.exitCode = exitCode;
|
||||
this.stdout = stdout;
|
||||
this.stderr = stderr;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return exitCode == 0;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return new StringBuilder()
|
||||
.append("<")
|
||||
.append(this.getClass().getName())
|
||||
.append(" ")
|
||||
.append("exitCode = [")
|
||||
.append(exitCode)
|
||||
.append("]")
|
||||
.append(" ")
|
||||
.append("stdout = [")
|
||||
.append(stdout)
|
||||
.append("]")
|
||||
.append(" ")
|
||||
.append("stderr = [")
|
||||
.append(stderr)
|
||||
.append("]")
|
||||
.append(">")
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
||||
private static class StreamCollector implements Runnable {
|
||||
private final InputStream input;
|
||||
private final Appendable appendable;
|
||||
|
||||
StreamCollector(InputStream input, Appendable appendable) {
|
||||
this.input = Objects.requireNonNull(input);
|
||||
this.appendable = Objects.requireNonNull(appendable);
|
||||
}
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
|
||||
BufferedReader reader = new BufferedReader(reader(input));
|
||||
String line;
|
||||
|
||||
while ((line = reader.readLine()) != null) {
|
||||
appendable.append(line);
|
||||
appendable.append("\n");
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "the system's default character set is a best guess of what subprocesses will use")
|
||||
private static InputStreamReader reader(InputStream inputStream) {
|
||||
return new InputStreamReader(inputStream);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -129,7 +130,8 @@ public abstract class TransportClient extends AbstractClient {
|
|||
providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build();
|
||||
}
|
||||
final PluginsService pluginsService = newPluginService(providedSettings, plugins);
|
||||
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build();
|
||||
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).put(ThreadContext.PREFIX
|
||||
+ "." + "transport_client", true).build();
|
||||
final List<Closeable> resourcesToClose = new ArrayList<>();
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
|
||||
|
|
|
@ -69,8 +69,11 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.ingest.IngestMetadata;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.persistent.PersistentTasksNodeService;
|
||||
import org.elasticsearch.plugins.ClusterPlugin;
|
||||
import org.elasticsearch.script.ScriptMetaData;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskResultsService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -140,6 +143,10 @@ public class ClusterModule extends AbstractModule {
|
|||
registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
|
||||
registerMetaDataCustom(entries, ScriptMetaData.TYPE, ScriptMetaData::new, ScriptMetaData::readDiffFrom);
|
||||
registerMetaDataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom);
|
||||
registerMetaDataCustom(entries, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::new,
|
||||
PersistentTasksCustomMetaData::readDiffFrom);
|
||||
// Task Status (not Diffable)
|
||||
entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new));
|
||||
return entries;
|
||||
}
|
||||
|
||||
|
@ -154,6 +161,8 @@ public class ClusterModule extends AbstractModule {
|
|||
ScriptMetaData::fromXContent));
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexGraveyard.TYPE),
|
||||
IndexGraveyard::fromXContent));
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
|
||||
PersistentTasksCustomMetaData::fromXContent));
|
||||
return entries;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
|
@ -38,11 +37,29 @@ public interface ClusterApplier {
|
|||
* @param clusterStateSupplier the cluster state supplier which provides the latest cluster state to apply
|
||||
* @param listener callback that is invoked after cluster state is applied
|
||||
*/
|
||||
void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterStateTaskListener listener);
|
||||
void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterApplyListener listener);
|
||||
|
||||
/**
|
||||
* Creates a new cluster state builder that is initialized with the cluster name and all initial cluster state customs.
|
||||
*/
|
||||
ClusterState.Builder newClusterStateBuilder();
|
||||
|
||||
/**
|
||||
* Listener for results of cluster state application
|
||||
*/
|
||||
interface ClusterApplyListener {
|
||||
/**
|
||||
* Called on successful cluster state application
|
||||
* @param source information where the cluster state came from
|
||||
*/
|
||||
default void onSuccess(String source) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Called on failure during cluster state application
|
||||
* @param source information where the cluster state came from
|
||||
* @param e exception that occurred
|
||||
*/
|
||||
void onFailure(String source, Exception e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ClusterStateApplier;
|
|||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.LocalNodeMasterListener;
|
||||
import org.elasticsearch.cluster.NodeConnectionsService;
|
||||
import org.elasticsearch.cluster.TimeoutClusterStateListener;
|
||||
|
@ -141,10 +140,10 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
class UpdateTask extends SourcePrioritizedRunnable implements Function<ClusterState, ClusterState> {
|
||||
final ClusterStateTaskListener listener;
|
||||
final ClusterApplyListener listener;
|
||||
final Function<ClusterState, ClusterState> updateFunction;
|
||||
|
||||
UpdateTask(Priority priority, String source, ClusterStateTaskListener listener,
|
||||
UpdateTask(Priority priority, String source, ClusterApplyListener listener,
|
||||
Function<ClusterState, ClusterState> updateFunction) {
|
||||
super(priority, source);
|
||||
this.listener = listener;
|
||||
|
@ -301,7 +300,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
public void runOnApplierThread(final String source, Consumer<ClusterState> clusterStateConsumer,
|
||||
final ClusterStateTaskListener listener, Priority priority) {
|
||||
final ClusterApplyListener listener, Priority priority) {
|
||||
submitStateUpdateTask(source, ClusterStateTaskConfig.build(priority),
|
||||
(clusterState) -> {
|
||||
clusterStateConsumer.accept(clusterState);
|
||||
|
@ -311,13 +310,13 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
public void runOnApplierThread(final String source, Consumer<ClusterState> clusterStateConsumer,
|
||||
final ClusterStateTaskListener listener) {
|
||||
final ClusterApplyListener listener) {
|
||||
runOnApplierThread(source, clusterStateConsumer, listener, Priority.HIGH);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNewClusterState(final String source, final Supplier<ClusterState> clusterStateSupplier,
|
||||
final ClusterStateTaskListener listener) {
|
||||
final ClusterApplyListener listener) {
|
||||
Function<ClusterState, ClusterState> applyFunction = currentState -> {
|
||||
ClusterState nextState = clusterStateSupplier.get();
|
||||
if (nextState != null) {
|
||||
|
@ -331,12 +330,12 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
|
||||
private void submitStateUpdateTask(final String source, final ClusterStateTaskConfig config,
|
||||
final Function<ClusterState, ClusterState> executor,
|
||||
final ClusterStateTaskListener listener) {
|
||||
final ClusterApplyListener listener) {
|
||||
if (!lifecycle.started()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
UpdateTask updateTask = new UpdateTask(config.priority(), source, new SafeClusterStateTaskListener(listener, logger), executor);
|
||||
UpdateTask updateTask = new UpdateTask(config.priority(), source, new SafeClusterApplyListener(listener, logger), executor);
|
||||
if (config.timeout() != null) {
|
||||
threadPoolExecutor.execute(updateTask, config.timeout(),
|
||||
() -> threadPool.generic().execute(
|
||||
|
@ -417,7 +416,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
task.listener.clusterStateProcessed(task.source, newClusterState, newClusterState);
|
||||
task.listener.onSuccess(task.source);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] no change in cluster state", task.source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, task.source);
|
||||
|
@ -486,7 +485,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
|
||||
callClusterStateListeners(clusterChangedEvent);
|
||||
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
task.listener.onSuccess(task.source);
|
||||
}
|
||||
|
||||
private void callClusterStateAppliers(ClusterChangedEvent clusterChangedEvent) {
|
||||
|
@ -511,11 +510,11 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
});
|
||||
}
|
||||
|
||||
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
|
||||
private final ClusterStateTaskListener listener;
|
||||
private static class SafeClusterApplyListener implements ClusterApplyListener {
|
||||
private final ClusterApplyListener listener;
|
||||
private final Logger logger;
|
||||
|
||||
SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) {
|
||||
SafeClusterApplyListener(ClusterApplyListener listener, Logger logger) {
|
||||
this.listener = listener;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
@ -532,14 +531,12 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
try {
|
||||
listener.clusterStateProcessed(source, oldState, newState);
|
||||
listener.onSuccess(source);
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage(
|
||||
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
|
||||
"{}\nnew cluster state:\n{}",
|
||||
source, oldState, newState), e);
|
||||
"exception thrown by listener while notifying of cluster state processed from [{}]", source), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -227,7 +227,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_CORS_MAX_AGE,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_PIPELINING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
|
||||
HttpTransportSettings.SETTING_HTTP_HOST,
|
||||
HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST,
|
||||
|
|
|
@ -22,18 +22,16 @@ package org.elasticsearch.discovery.single;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
|
||||
import org.elasticsearch.cluster.service.MasterService;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.PendingClusterStateStats;
|
||||
import org.elasticsearch.discovery.zen.PublishClusterStateStats;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -65,9 +63,9 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
|||
clusterState = event.state();
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
|
||||
ClusterApplyListener listener = new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
ackListener.onNodeAck(transportService.getLocalNode(), null);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -34,12 +33,11 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
|||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
|
||||
import org.elasticsearch.cluster.service.MasterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
|
@ -789,9 +787,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
clusterApplier.onNewClusterState("apply cluster state (from master [" + reason + "])",
|
||||
this::clusterState,
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
try {
|
||||
pendingStatesQueue.markAsProcessed(newClusterState);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -29,9 +29,11 @@ public class HttpHandlingSettings {
|
|||
private final boolean compression;
|
||||
private final int compressionLevel;
|
||||
private final boolean detailedErrorsEnabled;
|
||||
private final int pipeliningMaxEvents;
|
||||
|
||||
public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength,
|
||||
boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled) {
|
||||
boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled,
|
||||
int pipeliningMaxEvents) {
|
||||
this.maxContentLength = maxContentLength;
|
||||
this.maxChunkSize = maxChunkSize;
|
||||
this.maxHeaderSize = maxHeaderSize;
|
||||
|
@ -40,6 +42,7 @@ public class HttpHandlingSettings {
|
|||
this.compression = compression;
|
||||
this.compressionLevel = compressionLevel;
|
||||
this.detailedErrorsEnabled = detailedErrorsEnabled;
|
||||
this.pipeliningMaxEvents = pipeliningMaxEvents;
|
||||
}
|
||||
|
||||
public int getMaxContentLength() {
|
||||
|
@ -73,4 +76,8 @@ public class HttpHandlingSettings {
|
|||
public boolean getDetailedErrorsEnabled() {
|
||||
return detailedErrorsEnabled;
|
||||
}
|
||||
|
||||
public int getPipeliningMaxEvents() {
|
||||
return pipeliningMaxEvents;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http;
|
||||
|
||||
public class HttpPipelinedMessage implements Comparable<HttpPipelinedMessage> {
|
||||
|
||||
private final int sequence;
|
||||
|
||||
public HttpPipelinedMessage(int sequence) {
|
||||
this.sequence = sequence;
|
||||
}
|
||||
|
||||
public int getSequence() {
|
||||
return sequence;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(HttpPipelinedMessage o) {
|
||||
return Integer.compare(sequence, o.sequence);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http;
|
||||
|
||||
public class HttpPipelinedRequest<R> extends HttpPipelinedMessage {
|
||||
|
||||
private final R request;
|
||||
|
||||
HttpPipelinedRequest(int sequence, R request) {
|
||||
super(sequence);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
public R getRequest() {
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
public class HttpPipeliningAggregator<Response extends HttpPipelinedMessage, Listener> {
|
||||
|
||||
private final int maxEventsHeld;
|
||||
private final PriorityQueue<Tuple<Response, Listener>> outboundHoldingQueue;
|
||||
/*
|
||||
* The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the
|
||||
* channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the
|
||||
* current write sequence, implying that all preceding messages have been written.
|
||||
*/
|
||||
private int readSequence;
|
||||
private int writeSequence;
|
||||
|
||||
public HttpPipeliningAggregator(int maxEventsHeld) {
|
||||
this.maxEventsHeld = maxEventsHeld;
|
||||
this.outboundHoldingQueue = new PriorityQueue<>(1, Comparator.comparing(Tuple::v1));
|
||||
}
|
||||
|
||||
public <Request> HttpPipelinedRequest<Request> read(final Request request) {
|
||||
return new HttpPipelinedRequest<>(readSequence++, request);
|
||||
}
|
||||
|
||||
public List<Tuple<Response, Listener>> write(final Response response, Listener listener) {
|
||||
if (outboundHoldingQueue.size() < maxEventsHeld) {
|
||||
ArrayList<Tuple<Response, Listener>> readyResponses = new ArrayList<>();
|
||||
outboundHoldingQueue.add(new Tuple<>(response, listener));
|
||||
while (!outboundHoldingQueue.isEmpty()) {
|
||||
/*
|
||||
* Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence
|
||||
* number does not match the current write sequence number then we have not processed all preceding responses yet.
|
||||
*/
|
||||
final Tuple<Response, Listener> top = outboundHoldingQueue.peek();
|
||||
|
||||
if (top.v1().getSequence() != writeSequence) {
|
||||
break;
|
||||
}
|
||||
outboundHoldingQueue.poll();
|
||||
readyResponses.add(top);
|
||||
writeSequence++;
|
||||
}
|
||||
|
||||
return readyResponses;
|
||||
} else {
|
||||
int eventCount = outboundHoldingQueue.size() + 1;
|
||||
throw new IllegalStateException("Too many pipelined events [" + eventCount + "]. Max events allowed ["
|
||||
+ maxEventsHeld + "].");
|
||||
}
|
||||
}
|
||||
|
||||
public List<Tuple<Response, Listener>> removeAllInflightResponses() {
|
||||
ArrayList<Tuple<Response, Listener>> responses = new ArrayList<>(outboundHoldingQueue);
|
||||
outboundHoldingQueue.clear();
|
||||
return responses;
|
||||
}
|
||||
}
|
|
@ -49,8 +49,6 @@ public final class HttpTransportSettings {
|
|||
new Setting<>("http.cors.allow-headers", "X-Requested-With,Content-Type,Content-Length", (value) -> value, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS =
|
||||
Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_PIPELINING =
|
||||
Setting.boolSetting("http.pipelining", true, Property.NodeScope);
|
||||
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS =
|
||||
Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION =
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
|
|||
import org.apache.lucene.spatial.query.SpatialArgs;
|
||||
import org.apache.lucene.spatial.query.SpatialOperation;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
|
@ -77,6 +78,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
private static final ParseField SHAPE_TYPE_FIELD = new ParseField("type");
|
||||
private static final ParseField SHAPE_INDEX_FIELD = new ParseField("index");
|
||||
private static final ParseField SHAPE_PATH_FIELD = new ParseField("path");
|
||||
private static final ParseField SHAPE_ROUTING_FIELD = new ParseField("routing");
|
||||
private static final ParseField IGNORE_UNMAPPED_FIELD = new ParseField("ignore_unmapped");
|
||||
|
||||
private final String fieldName;
|
||||
|
@ -89,8 +91,10 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
private final String indexedShapeId;
|
||||
private final String indexedShapeType;
|
||||
|
||||
|
||||
private String indexedShapeIndex = DEFAULT_SHAPE_INDEX_NAME;
|
||||
private String indexedShapePath = DEFAULT_SHAPE_FIELD_NAME;
|
||||
private String indexedShapeRouting;
|
||||
|
||||
private ShapeRelation relation = DEFAULT_SHAPE_RELATION;
|
||||
|
||||
|
@ -166,6 +170,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
indexedShapeType = in.readOptionalString();
|
||||
indexedShapeIndex = in.readOptionalString();
|
||||
indexedShapePath = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
indexedShapeRouting = in.readOptionalString();
|
||||
} else {
|
||||
indexedShapeRouting = null;
|
||||
}
|
||||
}
|
||||
relation = ShapeRelation.readFromStream(in);
|
||||
strategy = in.readOptionalWriteable(SpatialStrategy::readFromStream);
|
||||
|
@ -188,6 +197,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
out.writeOptionalString(indexedShapeType);
|
||||
out.writeOptionalString(indexedShapeIndex);
|
||||
out.writeOptionalString(indexedShapePath);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeOptionalString(indexedShapeRouting);
|
||||
} else if (indexedShapeRouting != null) {
|
||||
throw new IllegalStateException("indexed shape routing cannot be serialized to older nodes");
|
||||
}
|
||||
}
|
||||
relation.writeTo(out);
|
||||
out.writeOptionalWriteable(strategy);
|
||||
|
@ -285,6 +299,26 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
return indexedShapePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the optional routing to the indexed Shape that will be used in the query
|
||||
*
|
||||
* @param indexedShapeRouting indexed shape routing
|
||||
* @return this
|
||||
*/
|
||||
public GeoShapeQueryBuilder indexedShapeRouting(String indexedShapeRouting) {
|
||||
this.indexedShapeRouting = indexedShapeRouting;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return the optional routing to the indexed Shape that will be used in the
|
||||
* Query
|
||||
*/
|
||||
public String indexedShapeRouting() {
|
||||
return indexedShapeRouting;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the relation of query shape and indexed shape.
|
||||
*
|
||||
|
@ -473,6 +507,9 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
if (indexedShapePath != null) {
|
||||
builder.field(SHAPE_PATH_FIELD.getPreferredName(), indexedShapePath);
|
||||
}
|
||||
if (indexedShapeRouting != null) {
|
||||
builder.field(SHAPE_ROUTING_FIELD.getPreferredName(), indexedShapeRouting);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
|
@ -498,6 +535,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
String type = null;
|
||||
String index = null;
|
||||
String shapePath = null;
|
||||
String shapeRouting = null;
|
||||
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
|
@ -544,6 +582,8 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
index = parser.text();
|
||||
} else if (SHAPE_PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
shapePath = parser.text();
|
||||
} else if (SHAPE_ROUTING_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
shapeRouting = parser.text();
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME +
|
||||
|
@ -581,6 +621,9 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
if (shapePath != null) {
|
||||
builder.indexedShapePath(shapePath);
|
||||
}
|
||||
if (shapeRouting != null) {
|
||||
builder.indexedShapeRouting(shapeRouting);
|
||||
}
|
||||
if (shapeRelation != null) {
|
||||
builder.relation(shapeRelation);
|
||||
}
|
||||
|
@ -602,6 +645,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
&& Objects.equals(indexedShapeIndex, other.indexedShapeIndex)
|
||||
&& Objects.equals(indexedShapePath, other.indexedShapePath)
|
||||
&& Objects.equals(indexedShapeType, other.indexedShapeType)
|
||||
&& Objects.equals(indexedShapeRouting, other.indexedShapeRouting)
|
||||
&& Objects.equals(relation, other.relation)
|
||||
&& Objects.equals(shape, other.shape)
|
||||
&& Objects.equals(supplier, other.supplier)
|
||||
|
@ -612,7 +656,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(fieldName, indexedShapeId, indexedShapeIndex,
|
||||
indexedShapePath, indexedShapeType, relation, shape, strategy, ignoreUnmapped, supplier);
|
||||
indexedShapePath, indexedShapeType, indexedShapeRouting, relation, shape, strategy, ignoreUnmapped, supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -629,6 +673,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
SetOnce<ShapeBuilder> supplier = new SetOnce<>();
|
||||
queryRewriteContext.registerAsyncAction((client, listener) -> {
|
||||
GetRequest getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId);
|
||||
getRequest.routing(indexedShapeRouting);
|
||||
fetch(client, getRequest, indexedShapePath, ActionListener.wrap(builder-> {
|
||||
supplier.set(builder);
|
||||
listener.onResponse(null);
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.cluster.AbstractNamedDiffable;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
|
|
@ -1438,6 +1438,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry);
|
||||
streamIn.setVersion(version);
|
||||
threadPool.getThreadContext().readHeaders(streamIn);
|
||||
threadPool.getThreadContext().putTransient("_remote_address", remoteAddress);
|
||||
if (TransportStatus.isRequest(status)) {
|
||||
handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status);
|
||||
} else {
|
||||
|
|
|
@ -139,6 +139,8 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
|
||||
protected static void assertHeaders(Map<String, String> headers, Map<String, String> expected) {
|
||||
assertNotNull(headers);
|
||||
headers = new HashMap<>(headers);
|
||||
headers.remove("transport_client"); // default header on TPC
|
||||
assertEquals(expected.size(), headers.size());
|
||||
for (Map.Entry<String, String> expectedEntry : expected.entrySet()) {
|
||||
assertEquals(headers.get(expectedEntry.getKey()), expectedEntry.getValue());
|
||||
|
@ -146,7 +148,6 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
protected static void assertHeaders(ThreadPool pool) {
|
||||
Map<String, String> headers = new HashMap<>();
|
||||
Settings asSettings = HEADER_SETTINGS.getAsSettings(ThreadContext.PREFIX);
|
||||
assertHeaders(pool.getThreadContext().getHeaders(),
|
||||
asSettings.keySet().stream().collect(Collectors.toMap(Function.identity(), k -> asSettings.get(k))));
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -63,6 +64,17 @@ public class TransportClientTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDefaultHeaderContainsPlugins() {
|
||||
Settings baseSettings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.build();
|
||||
try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) {
|
||||
ThreadContext threadContext = client.threadPool().getThreadContext();
|
||||
assertEquals("true", threadContext.getHeader("transport_client"));
|
||||
assertEquals("true", threadContext.getHeader("test"));
|
||||
}
|
||||
}
|
||||
|
||||
public static class MockPlugin extends Plugin {
|
||||
|
||||
@Override
|
||||
|
@ -70,6 +82,11 @@ public class TransportClientTests extends ESTestCase {
|
|||
return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings additionalSettings() {
|
||||
return Settings.builder().put(ThreadContext.PREFIX + "." + "test", true).build();
|
||||
}
|
||||
|
||||
public class MockNamedWriteable implements NamedWriteable {
|
||||
|
||||
static final String NAME = "mockNamedWritable";
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.NodeConnectionsService;
|
|||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -135,9 +136,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
clusterApplierService.currentTimeOverride = System.nanoTime();
|
||||
clusterApplierService.runOnApplierThread("test1",
|
||||
currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(),
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -151,9 +152,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos();
|
||||
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
|
||||
},
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
fail();
|
||||
}
|
||||
|
||||
|
@ -166,9 +167,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
// We don't check logging for this on since there is no guarantee that it will occur before our check
|
||||
clusterApplierService.runOnApplierThread("test3",
|
||||
currentState -> {},
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -216,9 +217,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
clusterApplierService.currentTimeOverride = System.nanoTime();
|
||||
clusterApplierService.runOnApplierThread("test1",
|
||||
currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(),
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
processedFirstTask.countDown();
|
||||
}
|
||||
|
@ -234,9 +235,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos();
|
||||
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
|
||||
},
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
fail();
|
||||
}
|
||||
|
||||
|
@ -247,9 +248,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
});
|
||||
clusterApplierService.runOnApplierThread("test3",
|
||||
currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos(),
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -262,9 +263,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
// We don't check logging for this on since there is no guarantee that it will occur before our check
|
||||
clusterApplierService.runOnApplierThread("test4",
|
||||
currentState -> {},
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -340,10 +341,10 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()).build(),
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -390,9 +391,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()).build(),
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,6 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase {
|
|||
internalCluster().getClusterName(),
|
||||
configurationSource,
|
||||
0,
|
||||
false,
|
||||
"other",
|
||||
Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class),
|
||||
Function.identity())) {
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.core.internal.io.IOUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||
|
@ -72,9 +71,9 @@ public class SingleNodeDiscoveryTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier,
|
||||
ClusterStateTaskListener listener) {
|
||||
ClusterApplyListener listener) {
|
||||
clusterState.set(clusterStateSupplier.get());
|
||||
listener.clusterStateProcessed(source, clusterState.get(), clusterState.get());
|
||||
listener.onSuccess(source);
|
||||
}
|
||||
});
|
||||
discovery.start();
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
|
|||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ESAllocationTestCase;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -314,8 +313,8 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterStateTaskListener listener) {
|
||||
listener.clusterStateProcessed(source, clusterStateSupplier.get(), clusterStateSupplier.get());
|
||||
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterApplyListener listener) {
|
||||
listener.onSuccess(source);
|
||||
}
|
||||
};
|
||||
ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()),
|
||||
|
|
|
@ -59,6 +59,7 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue
|
|||
private static String indexedShapeType;
|
||||
private static String indexedShapePath;
|
||||
private static String indexedShapeIndex;
|
||||
private static String indexedShapeRouting;
|
||||
private static ShapeBuilder indexedShapeToReturn;
|
||||
|
||||
@Override
|
||||
|
@ -85,6 +86,10 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue
|
|||
indexedShapePath = randomAlphaOfLengthBetween(3, 20);
|
||||
builder.indexedShapePath(indexedShapePath);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
indexedShapeRouting = randomAlphaOfLengthBetween(3, 20);
|
||||
builder.indexedShapeRouting(indexedShapeRouting);
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
SpatialStrategy strategy = randomFrom(SpatialStrategy.values());
|
||||
|
@ -112,6 +117,7 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue
|
|||
assertThat(indexedShapeType, notNullValue());
|
||||
assertThat(getRequest.id(), equalTo(indexedShapeId));
|
||||
assertThat(getRequest.type(), equalTo(indexedShapeType));
|
||||
assertThat(getRequest.routing(), equalTo(indexedShapeRouting));
|
||||
String expectedShapeIndex = indexedShapeIndex == null ? GeoShapeQueryBuilder.DEFAULT_SHAPE_INDEX_NAME : indexedShapeIndex;
|
||||
assertThat(getRequest.index(), equalTo(expectedShapeIndex));
|
||||
String expectedShapePath = indexedShapePath == null ? GeoShapeQueryBuilder.DEFAULT_SHAPE_FIELD_NAME : indexedShapePath;
|
||||
|
@ -136,6 +142,7 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue
|
|||
indexedShapeType = null;
|
||||
indexedShapePath = null;
|
||||
indexedShapeIndex = null;
|
||||
indexedShapeRouting = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
|
|||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
|
||||
import org.elasticsearch.cluster.service.ClusterApplierService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -446,9 +447,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder).build())
|
||||
.build();
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
clusterApplierService.onNewClusterState("test", () -> newState, new ClusterStateTaskListener() {
|
||||
clusterApplierService.onNewClusterState("test", () -> newState, new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
|
|
@ -33,9 +33,7 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -100,12 +98,6 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P
|
|||
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
|
||||
return Arrays.asList(
|
||||
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new),
|
||||
new NamedWriteableRegistry.Entry(Task.Status.class,
|
||||
PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new),
|
||||
new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE,
|
||||
PersistentTasksCustomMetaData::new),
|
||||
new NamedWriteableRegistry.Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE,
|
||||
PersistentTasksCustomMetaData::readDiffFrom),
|
||||
new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new)
|
||||
);
|
||||
}
|
||||
|
@ -113,8 +105,6 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P
|
|||
@Override
|
||||
public List<NamedXContentRegistry.Entry> getNamedXContent() {
|
||||
return Arrays.asList(
|
||||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
|
||||
PersistentTasksCustomMetaData::fromXContent),
|
||||
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME),
|
||||
TestParams::fromXContent),
|
||||
new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent)
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -121,6 +122,43 @@ public class GeoShapeIntegrationIT extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the indexed shape routing can be provided if it is required
|
||||
*/
|
||||
public void testIndexShapeRouting() throws Exception {
|
||||
String mapping = "{\n" +
|
||||
" \"_routing\": {\n" +
|
||||
" \"required\": true\n" +
|
||||
" },\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"shape\": {\n" +
|
||||
" \"type\": \"geo_shape\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
|
||||
|
||||
// create index
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping, XContentType.JSON).get());
|
||||
ensureGreen();
|
||||
|
||||
String source = "{\n" +
|
||||
" \"shape\" : {\n" +
|
||||
" \"type\" : \"circle\",\n" +
|
||||
" \"coordinates\" : [-45.0, 45.0],\n" +
|
||||
" \"radius\" : \"100m\"\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
indexRandom(true, client().prepareIndex("test", "doc", "0").setSource(source, XContentType.JSON).setRouting("ABC"));
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setQuery(
|
||||
geoShapeQuery("shape", "0", "doc").indexedShapeIndex("test").indexedShapeRouting("ABC")
|
||||
).get();
|
||||
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
|
||||
}
|
||||
|
||||
private String findNodeName(String index) {
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0);
|
||||
|
|
|
@ -24,13 +24,13 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.NodeConnectionsService;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
|
||||
import org.elasticsearch.cluster.service.ClusterApplierService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.cluster.service.MasterService;
|
||||
|
@ -72,9 +72,9 @@ public class ClusterServiceUtils {
|
|||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Exception> exception = new AtomicReference<>();
|
||||
executor.onNewClusterState("test setting state",
|
||||
() -> ClusterState.builder(clusterState).version(clusterState.version() + 1).build(), new ClusterStateTaskListener() {
|
||||
() -> ClusterState.builder(clusterState).version(clusterState.version() + 1).build(), new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
@ -163,9 +163,9 @@ public class ClusterServiceUtils {
|
|||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Exception> ex = new AtomicReference<>();
|
||||
clusterApplier.onNewClusterState("mock_publish_to_self[" + event.source() + "]", () -> event.state(),
|
||||
new ClusterStateTaskListener() {
|
||||
new ClusterApplyListener() {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
public void onSuccess(String source) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
|
|
|
@ -1829,7 +1829,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, getAutoMinMasterNodes(),
|
||||
minNumDataNodes, maxNumDataNodes,
|
||||
InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(),
|
||||
InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper());
|
||||
nodePrefix, mockPlugins, getClientWrapper());
|
||||
}
|
||||
|
||||
protected NodeConfigurationSource getNodeConfigSource() {
|
||||
|
|
|
@ -171,8 +171,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0;
|
||||
static final int DEFAULT_MAX_NUM_CLIENT_NODES = 1;
|
||||
|
||||
static final boolean DEFAULT_ENABLE_HTTP_PIPELINING = true;
|
||||
|
||||
/* sorted map to make traverse order reproducible, concurrent since we do checks on it not within a sync block */
|
||||
private final NavigableMap<String, NodeAndClient> nodes = new TreeMap<>();
|
||||
|
||||
|
@ -219,7 +217,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
public InternalTestCluster(long clusterSeed, Path baseDir,
|
||||
boolean randomlyAddDedicatedMasters,
|
||||
boolean autoManageMinMasterNodes, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes,
|
||||
boolean enableHttpPipelining, String nodePrefix, Collection<Class<? extends Plugin>> mockPlugins, Function<Client, Client> clientWrapper) {
|
||||
String nodePrefix, Collection<Class<? extends Plugin>> mockPlugins, Function<Client, Client> clientWrapper) {
|
||||
super(clusterSeed);
|
||||
this.autoManageMinMasterNodes = autoManageMinMasterNodes;
|
||||
this.clientWrapper = clientWrapper;
|
||||
|
@ -300,7 +298,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));
|
||||
builder.put(TcpTransport.PORT.getKey(), 0);
|
||||
builder.put("http.port", 0);
|
||||
builder.put("http.pipelining", enableHttpPipelining);
|
||||
if (Strings.hasLength(System.getProperty("tests.es.logger.level"))) {
|
||||
builder.put("logger.level", System.getProperty("tests.es.logger.level"));
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package org.elasticsearch.test.test;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
|
@ -28,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
@ -63,8 +62,6 @@ import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_M
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
/**
|
||||
|
@ -86,16 +83,15 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
|
||||
NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY;
|
||||
int numClientNodes = randomIntBetween(0, 10);
|
||||
boolean enableHttpPipelining = randomBoolean();
|
||||
String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
|
||||
|
||||
Path baseDir = createTempDir();
|
||||
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity());
|
||||
nodePrefix, Collections.emptyList(), Function.identity());
|
||||
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity());
|
||||
nodePrefix, Collections.emptyList(), Function.identity());
|
||||
// TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way
|
||||
assertClusters(cluster0, cluster1, false);
|
||||
|
||||
|
@ -211,16 +207,15 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
|
||||
boolean enableHttpPipelining = randomBoolean();
|
||||
String nodePrefix = "foobar";
|
||||
|
||||
Path baseDir = createTempDir();
|
||||
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity());
|
||||
nodePrefix, mockPlugins(), Function.identity());
|
||||
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity());
|
||||
nodePrefix, mockPlugins(), Function.identity());
|
||||
|
||||
assertClusters(cluster0, cluster1, false);
|
||||
long seed = randomLong();
|
||||
|
@ -280,12 +275,11 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
.put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build();
|
||||
}
|
||||
};
|
||||
boolean enableHttpPipelining = randomBoolean();
|
||||
String nodePrefix = "test";
|
||||
Path baseDir = createTempDir();
|
||||
InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity());
|
||||
nodePrefix, mockPlugins(), Function.identity());
|
||||
try {
|
||||
cluster.beforeTest(random(), 0.0);
|
||||
final int originalMasterCount = cluster.numMasterNodes();
|
||||
|
@ -390,7 +384,7 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
return Settings.builder()
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build();
|
||||
}
|
||||
}, 0, randomBoolean(), "", mockPlugins(), Function.identity());
|
||||
}, 0, "", mockPlugins(), Function.identity());
|
||||
cluster.beforeTest(random(), 0.0);
|
||||
List<DiscoveryNode.Role> roles = new ArrayList<>();
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
|
@ -473,13 +467,12 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
.put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build();
|
||||
}
|
||||
};
|
||||
boolean enableHttpPipelining = randomBoolean();
|
||||
String nodePrefix = "test";
|
||||
Path baseDir = createTempDir();
|
||||
List<Class<? extends Plugin>> plugins = new ArrayList<>(mockPlugins());
|
||||
plugins.add(NodeAttrCheckPlugin.class);
|
||||
InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, true, 2, 2,
|
||||
"test", nodeConfigurationSource, 0, enableHttpPipelining, nodePrefix,
|
||||
"test", nodeConfigurationSource, 0, nodePrefix,
|
||||
plugins, Function.identity());
|
||||
try {
|
||||
cluster.beforeTest(random(), 0.0);
|
||||
|
|
|
@ -1,50 +1,10 @@
|
|||
[role="xpack"]
|
||||
[[auditing]]
|
||||
== Auditing security events
|
||||
|
||||
You can enable auditing to keep track of security-related events such as
|
||||
authentication failures and refused connections. Logging these events enables you
|
||||
to monitor your cluster for suspicious activity and provides evidence in the
|
||||
event of an attack.
|
||||
|
||||
[IMPORTANT]
|
||||
============================================================================
|
||||
Audit logs are **disabled** by default. To enable this functionality, you
|
||||
must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`.
|
||||
============================================================================
|
||||
|
||||
{Security} provides two ways to persist audit logs:
|
||||
|
||||
* The <<audit-log-output, `logfile`>> output, which persists events to
|
||||
a dedicated `<clustername>_access.log` file on the host's file system.
|
||||
* The <<audit-index, `index`>> output, which persists events to an Elasticsearch index.
|
||||
The audit index can reside on the same cluster, or a separate cluster.
|
||||
|
||||
By default, only the `logfile` output is used when enabling auditing.
|
||||
To facilitate browsing and analyzing the events, you can also enable
|
||||
indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.outputs: [ index, logfile ]
|
||||
----------------------------
|
||||
|
||||
The `index` output type should be used in conjunction with the `logfile`
|
||||
output type Because it is possible for the `index` output type to lose
|
||||
messages if the target index is unavailable, the `access.log` should be
|
||||
used as the official record of events.
|
||||
|
||||
NOTE: Audit events are batched for indexing so there is a lag before
|
||||
events appear in the index. You can control how frequently batches of
|
||||
events are pushed to the index by setting
|
||||
`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`.
|
||||
|
||||
[float]
|
||||
[[audit-event-types]]
|
||||
=== Audit event types
|
||||
|
||||
Each request may generate multiple audit events.
|
||||
The following is a list of the events that can be generated:
|
||||
When you are <<auditing,auditing security events>>, each request can generate
|
||||
multiple audit events. The following is a list of the events that can be generated:
|
||||
|
||||
|======
|
||||
| `anonymous_access_denied` | | | Logged when a request is denied due to a missing
|
||||
|
@ -281,195 +241,3 @@ The log level determines which attributes are included in a log entry.
|
|||
| `rule` | The <<ip-filtering, IP filtering>> rule that denied
|
||||
the request.
|
||||
|======
|
||||
|
||||
[float]
|
||||
[[audit-log-output]]
|
||||
=== Logfile audit output
|
||||
|
||||
The `logfile` audit output is the default output for auditing. It writes data to
|
||||
the `<clustername>_access.log` file in the logs directory.
|
||||
|
||||
[float]
|
||||
[[audit-log-entry-format]]
|
||||
=== Log entry format
|
||||
|
||||
The format of a log entry is:
|
||||
|
||||
[source,txt]
|
||||
----------------------------------------------------------------------------
|
||||
[<timestamp>] [<local_node_info>] [<layer>] [<entry_type>] <attribute_list>
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
`<timestamp>` :: When the event occurred. You can configure the
|
||||
timestamp format in `log4j2.properties`.
|
||||
`<local_node_info>` :: Information about the local node that generated
|
||||
the log entry. You can control what node information
|
||||
is included by configuring the
|
||||
{ref}/auditing-settings.html#node-audit-settings[local node info settings].
|
||||
`<layer>` :: The layer from which this event originated:
|
||||
`rest`, `transport` or `ip_filter`.
|
||||
`<entry_type>` :: The type of event that occurred: `anonymous_access_denied`,
|
||||
`authentication_failed`, `access_denied`, `access_granted`,
|
||||
`connection_granted`, `connection_denied`.
|
||||
`<attribute_list>` :: A comma-separated list of key-value pairs that contain
|
||||
data pertaining to the event. Formatted as
|
||||
`attr1=[val1], attr2=[val2]`. See <<audit-event-attributes,
|
||||
Audit Entry Attributes>> for the attributes that can be included
|
||||
for each type of event.
|
||||
|
||||
[float]
|
||||
[[audit-log-settings]]
|
||||
=== Logfile output settings
|
||||
|
||||
The events and some other information about what gets logged can be
|
||||
controlled using settings in the `elasticsearch.yml` file. See
|
||||
{ref}/auditing-settings.html#event-audit-settings[Audited Event Settings] and
|
||||
{ref}/auditing-settings.html#node-audit-settings[Local Node Info Settings].
|
||||
|
||||
IMPORTANT: No filtering is performed when auditing, so sensitive data may be
|
||||
audited in plain text when including the request body in audit events.
|
||||
|
||||
[[logging-file]]
|
||||
You can also configure how the logfile is written in the `log4j2.properties`
|
||||
file located in `CONFIG_DIR`. By default, audit information is appended to the
|
||||
`<clustername>_access.log` file located in the standard Elasticsearch `logs` directory
|
||||
(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis.
|
||||
|
||||
[float]
|
||||
[[audit-log-ignore-policy]]
|
||||
=== Logfile audit events ignore policies
|
||||
|
||||
The comprehensive audit trail is necessary to ensure accountability. It offers tremendous
|
||||
value during incident response and can even be required for demonstrating compliance.
|
||||
|
||||
The drawback of an audited system is represented by the inevitable performance penalty incurred.
|
||||
In all truth, the audit trail spends _I/O ops_ that are not available anymore for the user's queries.
|
||||
Sometimes the verbosity of the audit trail may become a problem that the event type restrictions,
|
||||
<<audit-log-settings, defined by `include` and `exclude`>>, will not alleviate.
|
||||
|
||||
*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail.
|
||||
These policies define rules that match audit events which will be _ignored_ (read as: not printed).
|
||||
Rules match on the values of attributes of audit events and complement the <<audit-log-settings, include/exclude>> method.
|
||||
Imagine the corpus of audit events and the policies chopping off unwanted events.
|
||||
|
||||
IMPORTANT: When utilizing audit events ignore policies you are acknowledging potential
|
||||
accountability gaps that could render illegitimate actions undetectable.
|
||||
Please take time to review these policies whenever your system architecture changes.
|
||||
|
||||
A policy is a named set of filter rules. Each filter rule applies to a single event attribute,
|
||||
one of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines
|
||||
a list of {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit
|
||||
event attribute for the rule to match.
|
||||
A policy matches an event if *all* the rules comprising it match the event.
|
||||
An audit event is ignored, therefore not printed, if it matches *any* policy. All other
|
||||
non-matching events are printed as usual.
|
||||
|
||||
All policies are defined under the `xpack.security.audit.logfile.events.ignore_filters`
|
||||
settings namespace. For example, the following policy named _example1_ matches
|
||||
events from the _kibana_ or _admin_user_ principals **and** operating over indices of the
|
||||
wildcard form _app-logs*_:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.logfile.events.ignore_filters:
|
||||
example1:
|
||||
users: ["kibana", "admin_user"]
|
||||
indices: ["app-logs*"]
|
||||
----------------------------
|
||||
|
||||
An audit event generated by the _kibana_ user and operating over multiple indices
|
||||
, some of which do not match the indices wildcard, will not match.
|
||||
As expected, operations generated by all other users (even operating only on indices that
|
||||
match the _indices_ filter) will not match this policy either.
|
||||
|
||||
Audit events of different types may have <<audit-event-attributes, different attributes>>.
|
||||
If an event does not contain an attribute for which some policy defines filters, the
|
||||
event will not match the policy.
|
||||
For example, the following policy named _example2_, will never match `authentication_success` or
|
||||
`authentication_failed` events, irrespective of the user's roles, because these
|
||||
event schemas do not contain the `role` attribute:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.logfile.events.ignore_filters:
|
||||
example2:
|
||||
roles: ["admin", "ops_admin_*"]
|
||||
----------------------------
|
||||
|
||||
Likewise, any events of users with multiple roles, some of which do not match the
|
||||
regexps will not match this policy.
|
||||
|
||||
For completeness, although practical use cases should be sparse, a filter can match
|
||||
a missing attribute of an event, using the empty string ("") or the empty list ([]).
|
||||
For example, the following policy will match events that do not have the `indices`
|
||||
attribute (`anonymous_access_denied`, `authentication_success` and other types) as well
|
||||
as events over the _next_ index.
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.logfile.events.ignore_filters:
|
||||
example3:
|
||||
indices: ["next", ""]
|
||||
----------------------------
|
||||
|
||||
|
||||
[float]
|
||||
[[audit-index]]
|
||||
=== Index audit output
|
||||
|
||||
In addition to logging to a file, you can store audit logs in Elasticsearch
|
||||
rolling indices. These indices can be either on the same cluster, or on a
|
||||
remote cluster. You configure the following settings in
|
||||
`elasticsearch.yml` to control how audit entries are indexed. To enable
|
||||
this output, you need to configure the setting `xpack.security.audit.outputs`
|
||||
in the `elasticsearch.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.outputs: [ index, logfile ]
|
||||
----------------------------
|
||||
|
||||
For more configuration options, see
|
||||
{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings].
|
||||
|
||||
IMPORTANT: No filtering is performed when auditing, so sensitive data may be
|
||||
audited in plain text when including the request body in audit events.
|
||||
|
||||
[float]
|
||||
==== Audit index settings
|
||||
|
||||
You can also configure settings for the indices that the events are stored in.
|
||||
These settings are configured in the `xpack.security.audit.index.settings` namespace
|
||||
in `elasticsearch.yml`. For example, the following configuration sets the
|
||||
number of shards and replicas to 1 for the audit indices:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.index.settings:
|
||||
index:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
----------------------------
|
||||
|
||||
[float]
|
||||
==== Forwarding audit logs to a remote cluster
|
||||
|
||||
To index audit events to a remote Elasticsearch cluster, you configure
|
||||
the following `xpack.security.audit.index.client` settings:
|
||||
|
||||
* `xpack.security.audit.index.client.hosts`
|
||||
* `xpack.security.audit.index.client.cluster.name`
|
||||
* `xpack.security.audit.index.client.xpack.security.user`
|
||||
|
||||
For more information about these settings, see
|
||||
{ref}/auditing-settings.html#remote-audit-settings[Remote Audit Log Indexing Configuration Settings].
|
||||
|
||||
You can pass additional settings to the remote client by specifying them in the
|
||||
`xpack.security.audit.index.client` namespace. For example, to allow the remote
|
||||
client to discover all of the nodes in the remote cluster you can specify the
|
||||
`client.transport.sniff` setting:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.index.client.transport.sniff: true
|
||||
----------------------------
|
|
@ -0,0 +1,24 @@
|
|||
[role="xpack"]
|
||||
[float]
|
||||
[[forwarding-audit-logfiles]]
|
||||
==== Forwarding audit logs to a remote cluster
|
||||
|
||||
To index audit events to a remote Elasticsearch cluster, you configure
|
||||
the following `xpack.security.audit.index.client` settings:
|
||||
|
||||
* `xpack.security.audit.index.client.hosts`
|
||||
* `xpack.security.audit.index.client.cluster.name`
|
||||
* `xpack.security.audit.index.client.xpack.security.user`
|
||||
|
||||
For more information about these settings, see
|
||||
{ref}/auditing-settings.html#remote-audit-settings[Remote Audit Log Indexing Configuration Settings].
|
||||
|
||||
You can pass additional settings to the remote client by specifying them in the
|
||||
`xpack.security.audit.index.client` namespace. For example, to allow the remote
|
||||
client to discover all of the nodes in the remote cluster you can specify the
|
||||
`client.transport.sniff` setting:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.index.client.transport.sniff: true
|
||||
----------------------------
|
|
@ -0,0 +1,15 @@
|
|||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/overview.asciidoc
|
||||
include::overview.asciidoc[]
|
||||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/event-types.asciidoc
|
||||
include::event-types.asciidoc[]
|
||||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-logfile.asciidoc
|
||||
include::output-logfile.asciidoc[]
|
||||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-index.asciidoc
|
||||
include::output-index.asciidoc[]
|
||||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc
|
||||
include::forwarding-logs.asciidoc[]
|
|
@ -0,0 +1,38 @@
|
|||
[role="xpack"]
|
||||
[float]
|
||||
[[audit-index]]
|
||||
=== Index audit output
|
||||
|
||||
In addition to logging to a file, you can store audit logs in Elasticsearch
|
||||
rolling indices. These indices can be either on the same cluster, or on a
|
||||
remote cluster. You configure the following settings in
|
||||
`elasticsearch.yml` to control how audit entries are indexed. To enable
|
||||
this output, you need to configure the setting `xpack.security.audit.outputs`
|
||||
in the `elasticsearch.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.outputs: [ index, logfile ]
|
||||
----------------------------
|
||||
|
||||
For more configuration options, see
|
||||
{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings].
|
||||
|
||||
IMPORTANT: No filtering is performed when auditing, so sensitive data may be
|
||||
audited in plain text when including the request body in audit events.
|
||||
|
||||
[float]
|
||||
==== Audit index settings
|
||||
|
||||
You can also configure settings for the indices that the events are stored in.
|
||||
These settings are configured in the `xpack.security.audit.index.settings` namespace
|
||||
in `elasticsearch.yml`. For example, the following configuration sets the
|
||||
number of shards and replicas to 1 for the audit indices:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.index.settings:
|
||||
index:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
----------------------------
|
|
@ -0,0 +1,130 @@
|
|||
[role="xpack"]
|
||||
[float]
|
||||
[[audit-log-output]]
|
||||
=== Logfile audit output
|
||||
|
||||
The `logfile` audit output is the default output for auditing. It writes data to
|
||||
the `<clustername>_access.log` file in the logs directory.
|
||||
|
||||
[float]
|
||||
[[audit-log-entry-format]]
|
||||
=== Log entry format
|
||||
|
||||
The format of a log entry is:
|
||||
|
||||
[source,txt]
|
||||
----------------------------------------------------------------------------
|
||||
[<timestamp>] [<local_node_info>] [<layer>] [<entry_type>] <attribute_list>
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
`<timestamp>` :: When the event occurred. You can configure the
|
||||
timestamp format in `log4j2.properties`.
|
||||
`<local_node_info>` :: Information about the local node that generated
|
||||
the log entry. You can control what node information
|
||||
is included by configuring the
|
||||
{ref}/auditing-settings.html#node-audit-settings[local node info settings].
|
||||
`<layer>` :: The layer from which this event originated:
|
||||
`rest`, `transport` or `ip_filter`.
|
||||
`<entry_type>` :: The type of event that occurred: `anonymous_access_denied`,
|
||||
`authentication_failed`, `access_denied`, `access_granted`,
|
||||
`connection_granted`, `connection_denied`.
|
||||
`<attribute_list>` :: A comma-separated list of key-value pairs that contain
|
||||
data pertaining to the event. Formatted as
|
||||
`attr1=[val1], attr2=[val2]`. See <<audit-event-attributes,
|
||||
Audit Entry Attributes>> for the attributes that can be included
|
||||
for each type of event.
|
||||
|
||||
[float]
|
||||
[[audit-log-settings]]
|
||||
=== Logfile output settings
|
||||
|
||||
The events and some other information about what gets logged can be
|
||||
controlled using settings in the `elasticsearch.yml` file. See
|
||||
{ref}/auditing-settings.html#event-audit-settings[Audited Event Settings] and
|
||||
{ref}/auditing-settings.html#node-audit-settings[Local Node Info Settings].
|
||||
|
||||
IMPORTANT: No filtering is performed when auditing, so sensitive data may be
|
||||
audited in plain text when including the request body in audit events.
|
||||
|
||||
[[logging-file]]
|
||||
You can also configure how the logfile is written in the `log4j2.properties`
|
||||
file located in `CONFIG_DIR`. By default, audit information is appended to the
|
||||
`<clustername>_access.log` file located in the standard Elasticsearch `logs` directory
|
||||
(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis.
|
||||
|
||||
[float]
|
||||
[[audit-log-ignore-policy]]
|
||||
=== Logfile audit events ignore policies
|
||||
|
||||
The comprehensive audit trail is necessary to ensure accountability. It offers tremendous
|
||||
value during incident response and can even be required for demonstrating compliance.
|
||||
|
||||
The drawback of an audited system is represented by the inevitable performance penalty incurred.
|
||||
In all truth, the audit trail spends _I/O ops_ that are not available anymore for the user's queries.
|
||||
Sometimes the verbosity of the audit trail may become a problem that the event type restrictions,
|
||||
<<audit-log-settings, defined by `include` and `exclude`>>, will not alleviate.
|
||||
|
||||
*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail.
|
||||
These policies define rules that match audit events which will be _ignored_ (read as: not printed).
|
||||
Rules match on the values of attributes of audit events and complement the <<audit-log-settings, include/exclude>> method.
|
||||
Imagine the corpus of audit events and the policies chopping off unwanted events.
|
||||
|
||||
IMPORTANT: When utilizing audit events ignore policies you are acknowledging potential
|
||||
accountability gaps that could render illegitimate actions undetectable.
|
||||
Please take time to review these policies whenever your system architecture changes.
|
||||
|
||||
A policy is a named set of filter rules. Each filter rule applies to a single event attribute,
|
||||
one of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines
|
||||
a list of {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit
|
||||
event attribute for the rule to match.
|
||||
A policy matches an event if *all* the rules comprising it match the event.
|
||||
An audit event is ignored, therefore not printed, if it matches *any* policy. All other
|
||||
non-matching events are printed as usual.
|
||||
|
||||
All policies are defined under the `xpack.security.audit.logfile.events.ignore_filters`
|
||||
settings namespace. For example, the following policy named _example1_ matches
|
||||
events from the _kibana_ or _admin_user_ principals **and** operating over indices of the
|
||||
wildcard form _app-logs*_:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.logfile.events.ignore_filters:
|
||||
example1:
|
||||
users: ["kibana", "admin_user"]
|
||||
indices: ["app-logs*"]
|
||||
----------------------------
|
||||
|
||||
An audit event generated by the _kibana_ user and operating over multiple indices
|
||||
, some of which do not match the indices wildcard, will not match.
|
||||
As expected, operations generated by all other users (even operating only on indices that
|
||||
match the _indices_ filter) will not match this policy either.
|
||||
|
||||
Audit events of different types may have <<audit-event-attributes, different attributes>>.
|
||||
If an event does not contain an attribute for which some policy defines filters, the
|
||||
event will not match the policy.
|
||||
For example, the following policy named _example2_, will never match `authentication_success` or
|
||||
`authentication_failed` events, irrespective of the user's roles, because these
|
||||
event schemas do not contain the `role` attribute:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.logfile.events.ignore_filters:
|
||||
example2:
|
||||
roles: ["admin", "ops_admin_*"]
|
||||
----------------------------
|
||||
|
||||
Likewise, any events of users with multiple roles, some of which do not match the
|
||||
regexps will not match this policy.
|
||||
|
||||
For completeness, although practical use cases should be sparse, a filter can match
|
||||
a missing attribute of an event, using the empty string ("") or the empty list ([]).
|
||||
For example, the following policy will match events that do not have the `indices`
|
||||
attribute (`anonymous_access_denied`, `authentication_success` and other types) as well
|
||||
as events over the _next_ index.
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.logfile.events.ignore_filters:
|
||||
example3:
|
||||
indices: ["next", ""]
|
||||
----------------------------
|
|
@ -0,0 +1,40 @@
|
|||
[role="xpack"]
|
||||
[[auditing]]
|
||||
== Auditing security events
|
||||
|
||||
You can enable auditing to keep track of security-related events such as
|
||||
authentication failures and refused connections. Logging these events enables you
|
||||
to monitor your cluster for suspicious activity and provides evidence in the
|
||||
event of an attack.
|
||||
|
||||
[IMPORTANT]
|
||||
============================================================================
|
||||
Audit logs are **disabled** by default. To enable this functionality, you
|
||||
must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`.
|
||||
============================================================================
|
||||
|
||||
{Security} provides two ways to persist audit logs:
|
||||
|
||||
* The <<audit-log-output, `logfile`>> output, which persists events to
|
||||
a dedicated `<clustername>_access.log` file on the host's file system.
|
||||
* The <<audit-index, `index`>> output, which persists events to an Elasticsearch index.
|
||||
The audit index can reside on the same cluster, or a separate cluster.
|
||||
|
||||
By default, only the `logfile` output is used when enabling auditing.
|
||||
To facilitate browsing and analyzing the events, you can also enable
|
||||
indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.outputs: [ index, logfile ]
|
||||
----------------------------
|
||||
|
||||
The `index` output type should be used in conjunction with the `logfile`
|
||||
output type Because it is possible for the `index` output type to lose
|
||||
messages if the target index is unavailable, the `access.log` should be
|
||||
used as the official record of events.
|
||||
|
||||
NOTE: Audit events are batched for indexing so there is a lag before
|
||||
events appear in the index. You can control how frequently batches of
|
||||
events are pushed to the index by setting
|
||||
`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`.
|
|
@ -108,7 +108,7 @@ include::authentication/overview.asciidoc[]
|
|||
include::authorization/overview.asciidoc[]
|
||||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing.asciidoc
|
||||
include::auditing.asciidoc[]
|
||||
include::auditing/index.asciidoc[]
|
||||
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications.asciidoc
|
||||
include::securing-communications.asciidoc[]
|
||||
|
|
|
@ -1145,7 +1145,7 @@ Path to the PEM encoded file containing the private key.
|
|||
The passphrase that is used to decrypt the private key. This value is
|
||||
optional as the key might not be encrypted.
|
||||
|
||||
`xpack.ssl.secure_key_passphrase` ({<<secure-settings,Secure>>)::
|
||||
`xpack.ssl.secure_key_passphrase` (<<secure-settings,Secure>>)::
|
||||
The passphrase that is used to decrypt the private key. This value is
|
||||
optional as the key might not be encrypted.
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ Path to the keystore that holds the private key and certificate.
|
|||
+{ssl-prefix}.ssl.keystore.password+::
|
||||
Password to the keystore.
|
||||
|
||||
+{ssl-prefix}.ssl.keystore.secure_password` (<<secure-settings,Secure>>)::
|
||||
+{ssl-prefix}.ssl.keystore.secure_password+ (<<secure-settings,Secure>>)::
|
||||
Password to the keystore.
|
||||
|
||||
+{ssl-prefix}.ssl.keystore.key_password+::
|
||||
|
|
|
@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.license.DeleteLicenseAction;
|
||||
|
@ -105,13 +106,7 @@ import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction;
|
|||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
|
||||
import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage;
|
||||
import org.elasticsearch.persistent.CompletionPersistentTaskAction;
|
||||
import org.elasticsearch.persistent.PersistentTaskParams;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.persistent.PersistentTasksNodeService;
|
||||
import org.elasticsearch.persistent.RemovePersistentTaskAction;
|
||||
import org.elasticsearch.persistent.StartPersistentTaskAction;
|
||||
import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction;
|
||||
import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage;
|
||||
import org.elasticsearch.xpack.core.rollup.RollupField;
|
||||
import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction;
|
||||
|
@ -208,6 +203,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
final Settings.Builder builder = Settings.builder();
|
||||
builder.put(SecuritySettings.addTransportSettings(settings));
|
||||
builder.put(SecuritySettings.addUserSettings(settings));
|
||||
builder.put(ThreadContext.PREFIX + "." + "has_xpack", true);
|
||||
return builder.build();
|
||||
} else {
|
||||
return Settings.EMPTY;
|
||||
|
@ -268,11 +264,6 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
GetCalendarEventsAction.INSTANCE,
|
||||
PostCalendarEventsAction.INSTANCE,
|
||||
PersistJobAction.INSTANCE,
|
||||
// licensing
|
||||
StartPersistentTaskAction.INSTANCE,
|
||||
UpdatePersistentTaskStatusAction.INSTANCE,
|
||||
RemovePersistentTaskAction.INSTANCE,
|
||||
CompletionPersistentTaskAction.INSTANCE,
|
||||
// security
|
||||
ClearRealmCacheAction.INSTANCE,
|
||||
ClearRolesCacheAction.INSTANCE,
|
||||
|
@ -341,18 +332,12 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
// ML - Custom metadata
|
||||
new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new),
|
||||
new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new),
|
||||
new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE,
|
||||
PersistentTasksCustomMetaData::new),
|
||||
new NamedWriteableRegistry.Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE,
|
||||
PersistentTasksCustomMetaData::readDiffFrom),
|
||||
// ML - Persistent action requests
|
||||
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME,
|
||||
StartDatafeedAction.DatafeedParams::new),
|
||||
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME,
|
||||
OpenJobAction.JobParams::new),
|
||||
// ML - Task statuses
|
||||
new NamedWriteableRegistry.Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME,
|
||||
PersistentTasksNodeService.Status::new),
|
||||
new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new),
|
||||
new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream),
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING,
|
||||
|
@ -405,8 +390,6 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
// ML - Custom metadata
|
||||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("ml"),
|
||||
parser -> MlMetadata.METADATA_PARSER.parse(parser, null).build()),
|
||||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
|
||||
PersistentTasksCustomMetaData::fromXContent),
|
||||
// ML - Persistent action requests
|
||||
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(StartDatafeedAction.TASK_NAME),
|
||||
StartDatafeedAction.DatafeedParams::fromXContent),
|
||||
|
@ -422,8 +405,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE),
|
||||
LicensesMetaData::fromXContent),
|
||||
//rollup
|
||||
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME),
|
||||
parser -> RollupJob.fromXContent(parser)),
|
||||
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), RollupJob::fromXContent),
|
||||
new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent),
|
||||
// ILM - Custom Metadata
|
||||
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexLifecycleMetadata.TYPE),
|
||||
|
|
|
@ -214,7 +214,7 @@ public class IndexAuditTrailTests extends SecurityIntegTestCase {
|
|||
mockPlugins.add(getTestTransportPlugin());
|
||||
}
|
||||
remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, cluster2Name,
|
||||
cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, mockPlugins,
|
||||
cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, mockPlugins,
|
||||
useSecurity ? getClientWrapper() : Function.identity());
|
||||
remoteCluster.beforeTest(random(), 0.5);
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ public class RemoteIndexAuditTrailStartingTests extends SecurityIntegTestCase {
|
|||
}
|
||||
};
|
||||
remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes,
|
||||
cluster2Name, cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper());
|
||||
cluster2Name, cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper());
|
||||
remoteCluster.beforeTest(random(), 0.0);
|
||||
assertNoTimeout(remoteCluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get());
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.persistent.PersistentTaskParams;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.persistent.PersistentTasksNodeService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
|
@ -447,14 +445,10 @@ abstract class MlNativeAutodetectIntegTestCase extends ESIntegTestCase {
|
|||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedWriteables());
|
||||
entries.addAll(new SearchModule(Settings.EMPTY, true, Collections.emptyList()).getNamedWriteables());
|
||||
entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE,
|
||||
PersistentTasksCustomMetaData::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME,
|
||||
StartDatafeedAction.DatafeedParams::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME,
|
||||
OpenJobAction.JobParams::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME,
|
||||
PersistentTasksNodeService.Status::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream));
|
||||
entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetaData.TYPE, TokenMetaData::new));
|
||||
|
|
Loading…
Reference in New Issue