Merge remote-tracking branch 'upstream/master' into index-lifecycle
This commit is contained in:
commit
810cd46a30
|
@ -64,11 +64,10 @@ public final class PutRoleMappingResponse {
|
|||
private static final ConstructingObjectParser<PutRoleMappingResponse, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"put_role_mapping_response", true, args -> new PutRoleMappingResponse((boolean) args[0]));
|
||||
static {
|
||||
PARSER.declareBoolean(constructorArg(), new ParseField("created"));
|
||||
// To parse the "created" field we declare "role_mapping" field object.
|
||||
// Once the nested field "created" is found parser constructs the target object and
|
||||
// ignores the role_mapping object.
|
||||
PARSER.declareObject((a,b) -> {}, (parser, context) -> null, new ParseField("role_mapping"));
|
||||
ConstructingObjectParser<Boolean, Void> roleMappingParser = new ConstructingObjectParser<>(
|
||||
"put_role_mapping_response.role_mapping", true, args -> (Boolean) args[0]);
|
||||
roleMappingParser.declareBoolean(constructorArg(), new ParseField("created"));
|
||||
PARSER.declareObject(constructorArg(), roleMappingParser::parse, new ParseField("role_mapping"));
|
||||
}
|
||||
|
||||
public static PutRoleMappingResponse fromXContent(XContentParser parser) throws IOException {
|
||||
|
|
|
@ -46,9 +46,9 @@ PUT _template/template_1
|
|||
NOTE: Index templates provide C-style /* */ block comments. Comments are allowed
|
||||
everywhere in the JSON document except before the initial opening curly bracket.
|
||||
|
||||
Defines a template named `template_1`, with a template pattern of `te*`.
|
||||
Defines a template named `template_1`, with a template pattern of `te*` or `bar*`.
|
||||
The settings and mappings will be applied to any index name that matches
|
||||
the `te*` pattern.
|
||||
the `te*` or `bar*` pattern.
|
||||
|
||||
It is also possible to include aliases in an index template as follows:
|
||||
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
[role="xpack"]
|
||||
[testenv="gold"]
|
||||
[[configuring-metricbeat]]
|
||||
=== Monitoring {es} with {metricbeat}
|
||||
|
||||
beta[] In 6.5 and later, you can use {metricbeat} to collect data about {es}
|
||||
and ship it to the monitoring cluster, rather than routing it through exporters
|
||||
as described in <<configuring-monitoring>>.
|
||||
|
||||
image::monitoring/images/metricbeat.png[Example monitoring architecture]
|
||||
|
||||
To learn about monitoring in general, see
|
||||
{stack-ov}/xpack-monitoring.html[Monitoring the {stack}].
|
||||
|
||||
. Enable the collection of monitoring data. Set
|
||||
`xpack.monitoring.collection.enabled` to `true` on the production cluster. +
|
||||
+
|
||||
--
|
||||
For example, you can use the following APIs to review and change this setting:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
GET _cluster/settings
|
||||
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"xpack.monitoring.collection.enabled": true
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
|
||||
For more information, see <<monitoring-settings>> and <<cluster-update-settings>>.
|
||||
--
|
||||
|
||||
. Disable the default collection of {es} monitoring metrics. Set
|
||||
`xpack.monitoring.elasticsearch.collection.enabled` to `false` on the production
|
||||
cluster. +
|
||||
+
|
||||
--
|
||||
For example, you can use the following API to change this setting:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"xpack.monitoring.elasticsearch.collection.enabled": false
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
|
||||
Leave `xpack.monitoring.enabled` set to its default value (`true`).
|
||||
--
|
||||
|
||||
. On each {es} node in the production cluster:
|
||||
|
||||
.. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}].
|
||||
|
||||
.. Enable the {es} module in {metricbeat}. +
|
||||
+
|
||||
--
|
||||
For example, to enable the default configuration in the `modules.d` directory,
|
||||
run the following command:
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
----------------------------------------------------------------------
|
||||
metricbeat modules enable elasticsearch
|
||||
----------------------------------------------------------------------
|
||||
|
||||
For more information, see
|
||||
{metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and
|
||||
{metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module].
|
||||
--
|
||||
|
||||
.. Configure the {es} module in {metricbeat}. +
|
||||
+
|
||||
--
|
||||
You must specify the following settings in the `modules.d/elasticsearch.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------------
|
||||
- module: elasticsearch
|
||||
metricsets:
|
||||
- ccr
|
||||
- cluster_stats
|
||||
- index
|
||||
- index_recovery
|
||||
- index_summary
|
||||
- ml_job
|
||||
- node_stats
|
||||
- shard
|
||||
period: 10s
|
||||
hosts: ["http://localhost:9200"] <1>
|
||||
xpack.enabled: true
|
||||
----------------------------------
|
||||
<1> This setting identifies the host and port number that are used to access {es}.
|
||||
--
|
||||
|
||||
.. If {security} is enabled, you must also provide a user ID and password so that
|
||||
{metricbeat} can collect metrics successfully.
|
||||
|
||||
... Create or identify a user that you want to use to collect the metrics.
|
||||
+
|
||||
--
|
||||
TIP: There is a `remote_monitoring_user` built-in user that grants the privileges
|
||||
necessary for {metricbeat} to monitor {stack} products. See
|
||||
{stack-ov}/built-in-users.html[Built-in users].
|
||||
|
||||
Alternatively, you can choose a different user and give them the
|
||||
`remote_monitoring_collector` {stack-ov}/built-in-roles.html[built-in role].
|
||||
--
|
||||
|
||||
... Add the `username` and `password` settings to the {es} module configuration
|
||||
file.
|
||||
+
|
||||
--
|
||||
For example, add the following settings in the `modules.d/kibana.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------------
|
||||
- module: elasticsearch
|
||||
...
|
||||
username: remote_monitoring_user
|
||||
password: YOUR_PASSWORD
|
||||
----------------------------------
|
||||
--
|
||||
|
||||
.. If you configured {es} to use <<configuring-tls,encrypted communications>>,
|
||||
you must access it via HTTPS. For example, use a `hosts` setting like
|
||||
`https://localhost:9200` in the `modules.d/elasticsearch.yml` file.
|
||||
|
||||
.. Identify where to send the monitoring data. +
|
||||
+
|
||||
--
|
||||
TIP: In production environments, we strongly recommend using a separate cluster
|
||||
(referred to as the _monitoring cluster_) to store the data. Using a separate
|
||||
monitoring cluster prevents production cluster outages from impacting your
|
||||
ability to access your monitoring data. It also prevents monitoring activities
|
||||
from impacting the performance of your production cluster.
|
||||
|
||||
For example, specify the {es} output information in the {metricbeat}
|
||||
configuration file (`metricbeat.yml`):
|
||||
|
||||
[source,yaml]
|
||||
----------------------------------
|
||||
output.elasticsearch:
|
||||
hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1>
|
||||
----------------------------------
|
||||
<1> In this example, the data is stored on a monitoring cluster with nodes
|
||||
`es-mon-1` and `es-mon-2`.
|
||||
|
||||
For more information about these configuration options, see
|
||||
{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output].
|
||||
--
|
||||
|
||||
.. If {security} is enabled on the monitoring cluster, you must provide a valid
|
||||
user ID and password so that {metricbeat} can send metrics successfully.
|
||||
|
||||
... Create or identify a user that you want to use to send the metrics.
|
||||
+
|
||||
--
|
||||
TIP: There is a `remote_monitoring_user` built-in user that grants the privileges
|
||||
necessary for {metricbeat} to monitor {stack} products. See
|
||||
{stack-ov}/built-in-users.html[Built-in users].
|
||||
|
||||
Alternatively, you can choose a different user and give them the
|
||||
`remote_monitoring_agent` {stack-ov}/built-in-roles.html[built-in role].
|
||||
--
|
||||
|
||||
... Add the `username` and `password` settings to the {es} output information in
|
||||
the {metricbeat} configuration file (`metricbeat.yml`):
|
||||
+
|
||||
--
|
||||
[source,yaml]
|
||||
----------------------------------
|
||||
output.elasticsearch:
|
||||
...
|
||||
username: remote_monitoring_user
|
||||
password: YOUR_PASSWORD
|
||||
----------------------------------
|
||||
--
|
||||
|
||||
.. If you configured the monitoring cluster to use
|
||||
<<configuring-tls,encrypted communications>>, you must access it via
|
||||
HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200` in the
|
||||
`metricbeat.yml` file.
|
||||
|
||||
. <<starting-elasticsearch,Start {es}>>.
|
||||
|
||||
. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}].
|
||||
|
||||
. {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}].
|
|
@ -6,18 +6,27 @@
|
|||
<titleabbrev>Configuring monitoring</titleabbrev>
|
||||
++++
|
||||
|
||||
By default, {monitoring} is enabled but data collection is disabled. Advanced
|
||||
monitoring settings enable you to control how frequently data is collected,
|
||||
configure timeouts, and set the retention period for locally-stored monitoring
|
||||
indices. You can also adjust how monitoring data is displayed.
|
||||
If you enable the collection of monitoring data in your cluster, you can
|
||||
optionally collect metrics about {es}. By default, {monitoring} is enabled but
|
||||
data collection is disabled.
|
||||
|
||||
The following method involves sending the metrics to the monitoring cluster by
|
||||
using exporters. For an alternative method, see <<configuring-metricbeat>>.
|
||||
|
||||
Advanced monitoring settings enable you to control how frequently data is
|
||||
collected, configure timeouts, and set the retention period for locally-stored
|
||||
monitoring indices. You can also adjust how monitoring data is displayed.
|
||||
|
||||
To learn about monitoring in general, see
|
||||
{stack-ov}/xpack-monitoring.html[Monitoring the {stack}].
|
||||
|
||||
. To collect monitoring data about your {es} cluster:
|
||||
|
||||
.. Verify that the `xpack.monitoring.enabled`,
|
||||
`xpack.monitoring.collection.enabled`, and
|
||||
`xpack.monitoring.elasticsearch.collection.enabled` settings are `true` on each
|
||||
node in the cluster. By default xpack.monitoring.collection.enabled is disabled
|
||||
(`false`), and that overrides xpack.monitoring.elasticsearch.collection.enabled,
|
||||
node in the cluster. By default `xpack.monitoring.collection.enabled` is disabled
|
||||
(`false`), and that overrides `xpack.monitoring.elasticsearch.collection.enabled`,
|
||||
which defaults to being enabled (`true`). Both settings can be set dynamically
|
||||
at runtime. For more information, see <<monitoring-settings>>.
|
||||
|
||||
|
@ -69,8 +78,9 @@ see {stack-ov}/how-monitoring-works.html[How Monitoring Works].
|
|||
a dedicated monitoring cluster:
|
||||
|
||||
.. Create a user on the monitoring cluster that has the
|
||||
{xpack-ref}/built-in-roles.html#built-in-roles-remote-monitoring-agent[`remote_monitoring_agent` built-in role]. For example, the following request
|
||||
creates a `remote_monitor` user that has the `remote_monitoring_agent` role:
|
||||
{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[`remote_monitoring_agent` built-in role].
|
||||
For example, the following request creates a `remote_monitor` user that has the
|
||||
`remote_monitoring_agent` role:
|
||||
+
|
||||
--
|
||||
[source, sh]
|
||||
|
@ -87,12 +97,17 @@ POST /_xpack/security/user/remote_monitor
|
|||
--
|
||||
|
||||
.. On each node in the cluster that is being monitored, configure the `http`
|
||||
exporter to use the appropriate credentials when data is shipped to the monitoring cluster.
|
||||
exporter to use the appropriate credentials when data is shipped to the
|
||||
monitoring cluster.
|
||||
+
|
||||
--
|
||||
If SSL/TLS is enabled on the monitoring cluster, you must use the HTTPS protocol in the `host` setting. You must also include the CA certificate in each node's trusted certificates in order to verify the identities of the nodes in the monitoring cluster.
|
||||
If SSL/TLS is enabled on the monitoring cluster, you must use the HTTPS protocol
|
||||
in the `host` setting. You must also include the CA certificate in each node's
|
||||
trusted certificates in order to verify the identities of the nodes in the
|
||||
monitoring cluster.
|
||||
|
||||
The following example specifies the location of the PEM encoded certificate with the `certificate_authorities` setting:
|
||||
The following example specifies the location of the PEM encoded certificate with
|
||||
the `certificate_authorities` setting:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
|
@ -144,5 +159,8 @@ stored, that is to say the monitoring cluster. To grant all of the necessary per
|
|||
. Optional:
|
||||
<<config-monitoring-indices,Configure the indices that store the monitoring data>>.
|
||||
|
||||
. {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}].
|
||||
|
||||
include::configuring-metricbeat.asciidoc[]
|
||||
include::indices.asciidoc[]
|
||||
include::{es-repo-dir}/settings/monitoring-settings.asciidoc[]
|
Binary file not shown.
After Width: | Height: | Size: 78 KiB |
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[config-monitoring-indices]]
|
||||
=== Configuring Indices for Monitoring
|
||||
=== Configuring indices for monitoring
|
||||
|
||||
<<indices-templates,Index templates>> are used to configure the indices
|
||||
that store the monitoring data collected from a cluster.
|
||||
|
|
|
@ -109,7 +109,9 @@ request) tells Elasticsearch how long it should keep the search context alive.
|
|||
Its value (e.g. `1m`, see <<time-units>>) does not need to be long enough to
|
||||
process all data -- it just needs to be long enough to process the previous
|
||||
batch of results. Each `scroll` request (with the `scroll` parameter) sets a
|
||||
new expiry time.
|
||||
new expiry time. If a `scroll` request doesn't pass in the `scroll`
|
||||
parameter, then the search context will be freed as part of _that_ `scroll`
|
||||
request.
|
||||
|
||||
Normally, the background merge process optimizes the
|
||||
index by merging together smaller segments to create new bigger segments, at
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
[role="xpack"]
|
||||
[[monitoring-settings]]
|
||||
=== Monitoring Settings in Elasticsearch
|
||||
=== Monitoring settings in Elasticsearch
|
||||
++++
|
||||
<titleabbrev>Monitoring Settings</titleabbrev>
|
||||
<titleabbrev>Monitoring settings</titleabbrev>
|
||||
++++
|
||||
|
||||
By default, monitoring is enabled but data collection is disabled. To enable
|
||||
|
@ -43,17 +43,14 @@ to `true`. Its default value is `false`.
|
|||
The `xpack.monitoring.collection` settings control how data is collected from
|
||||
your Elasticsearch nodes.
|
||||
|
||||
`xpack.monitoring.collection.enabled`::
|
||||
`xpack.monitoring.collection.enabled`:: (<<cluster-update-settings,Dynamic>>)
|
||||
|
||||
added[6.3.0] Set to `true` to enable the collection of monitoring data. When
|
||||
this setting is `false` (default), {es} monitoring data is not collected and
|
||||
all monitoring data from other sources such as {kib}, Beats, and Logstash is
|
||||
ignored.
|
||||
+
|
||||
You can update this setting through the
|
||||
<<cluster-update-settings,Cluster Update Settings API>>.
|
||||
|
||||
`xpack.monitoring.collection.interval`::
|
||||
`xpack.monitoring.collection.interval`:: (<<cluster-update-settings,Dynamic>>)
|
||||
|
||||
Setting to `-1` to disable data collection is no longer supported beginning with
|
||||
7.0.0. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to
|
||||
|
@ -62,35 +59,26 @@ Setting to `-1` to disable data collection is no longer supported beginning with
|
|||
Controls how often data samples are collected. Defaults to `10s`. If you
|
||||
modify the collection interval, set the `xpack.monitoring.min_interval_seconds`
|
||||
option in `kibana.yml` to the same value.
|
||||
+
|
||||
You can update this setting through the
|
||||
<<cluster-update-settings,Cluster Update Settings API>>.
|
||||
|
||||
`xpack.monitoring.elasticsearch.collection.enabled`::
|
||||
`xpack.monitoring.elasticsearch.collection.enabled`:: (<<cluster-update-settings,Dynamic>>)
|
||||
|
||||
Controls whether statistics about your {es} cluster should be collected. Defaults to `true`.
|
||||
This is different from xpack.monitoring.collection.enabled, which allows you to enable or disable
|
||||
all monitoring collection. However, this setting simply disables the collection of Elasticsearch
|
||||
data while still allowing other data (e.g., Kibana, Logstash, Beats, or APM Server monitoring data)
|
||||
to pass through this cluster.
|
||||
+
|
||||
You can update this setting through the
|
||||
<<cluster-update-settings,Cluster Update Settings API>>.
|
||||
|
||||
`xpack.monitoring.collection.cluster.stats.timeout`::
|
||||
|
||||
Sets the timeout for collecting the cluster statistics. Defaults to `10s`.
|
||||
|
||||
`xpack.monitoring.collection.indices`::
|
||||
`xpack.monitoring.collection.indices`:: (<<cluster-update-settings,Dynamic>>)
|
||||
|
||||
Controls which indices Monitoring collects data from. Defaults to all indices. Specify the index names
|
||||
as a comma-separated list, for example `test1,test2,test3`. Names can include wildcards, for
|
||||
example `test*`. You can explicitly include or exclude indices by prepending
|
||||
`+` to include the index, or `-` to exclude the index. For example, to include all indices that
|
||||
start with `test` except `test3`, you could specify `+test*,-test3`.
|
||||
+
|
||||
You can update this setting through the
|
||||
<<cluster-update-settings,Cluster Update Settings API>>.
|
||||
|
||||
`xpack.monitoring.collection.index.stats.timeout`::
|
||||
|
||||
|
|
|
@ -324,9 +324,31 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
switch (token) {
|
||||
case START_OBJECT:
|
||||
parseValue(parser, fieldParser, currentFieldName, value, context);
|
||||
/*
|
||||
* Well behaving parsers should consume the entire object but
|
||||
* asserting that they do that is not something we can do
|
||||
* efficiently here. Instead we can check that they end on an
|
||||
* END_OBJECT. They could end on the *wrong* end object and
|
||||
* this test won't catch them, but that is the price that we pay
|
||||
* for having a cheap test.
|
||||
*/
|
||||
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on END_OBJECT");
|
||||
}
|
||||
break;
|
||||
case START_ARRAY:
|
||||
parseArray(parser, fieldParser, currentFieldName, value, context);
|
||||
/*
|
||||
* Well behaving parsers should consume the entire array but
|
||||
* asserting that they do that is not something we can do
|
||||
* efficiently here. Instead we can check that they end on an
|
||||
* END_ARRAY. They could end on the *wrong* end array and
|
||||
* this test won't catch them, but that is the price that we pay
|
||||
* for having a cheap test.
|
||||
*/
|
||||
if (parser.currentToken() != XContentParser.Token.END_ARRAY) {
|
||||
throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on END_ARRAY");
|
||||
}
|
||||
break;
|
||||
case END_OBJECT:
|
||||
case END_ARRAY:
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
@ -650,6 +651,49 @@ public class ObjectParserTests extends ESTestCase {
|
|||
assertThat(ex.getMessage(), containsString("[foo] failed to parse field [int_array]"));
|
||||
}
|
||||
|
||||
public void testNoopDeclareObject() throws IOException {
|
||||
ObjectParser<AtomicReference<String>, Void> parser = new ObjectParser<>("noopy", AtomicReference::new);
|
||||
parser.declareString(AtomicReference::set, new ParseField("body"));
|
||||
parser.declareObject((a,b) -> {}, (p, c) -> null, new ParseField("noop"));
|
||||
|
||||
assertEquals("i", parser.parse(createParser(JsonXContent.jsonXContent, "{\"body\": \"i\"}"), null).get());
|
||||
Exception garbageException = expectThrows(IllegalStateException.class, () -> parser.parse(
|
||||
createParser(JsonXContent.jsonXContent, "{\"noop\": {\"garbage\": \"shouldn't\"}}"),
|
||||
null));
|
||||
assertEquals("parser for [noop] did not end on END_OBJECT", garbageException.getMessage());
|
||||
Exception sneakyException = expectThrows(IllegalStateException.class, () -> parser.parse(
|
||||
createParser(JsonXContent.jsonXContent, "{\"noop\": {\"body\": \"shouldn't\"}}"),
|
||||
null));
|
||||
assertEquals("parser for [noop] did not end on END_OBJECT", sneakyException.getMessage());
|
||||
}
|
||||
|
||||
public void testNoopDeclareField() throws IOException {
|
||||
ObjectParser<AtomicReference<String>, Void> parser = new ObjectParser<>("noopy", AtomicReference::new);
|
||||
parser.declareString(AtomicReference::set, new ParseField("body"));
|
||||
parser.declareField((a,b) -> {}, (p, c) -> null, new ParseField("noop"), ValueType.STRING_ARRAY);
|
||||
|
||||
assertEquals("i", parser.parse(createParser(JsonXContent.jsonXContent, "{\"body\": \"i\"}"), null).get());
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> parser.parse(
|
||||
createParser(JsonXContent.jsonXContent, "{\"noop\": [\"ignored\"]}"),
|
||||
null));
|
||||
assertEquals("parser for [noop] did not end on END_ARRAY", e.getMessage());
|
||||
}
|
||||
|
||||
public void testNoopDeclareObjectArray() throws IOException {
|
||||
ObjectParser<AtomicReference<String>, Void> parser = new ObjectParser<>("noopy", AtomicReference::new);
|
||||
parser.declareString(AtomicReference::set, new ParseField("body"));
|
||||
parser.declareObjectArray((a,b) -> {}, (p, c) -> null, new ParseField("noop"));
|
||||
|
||||
XContentParseException garbageError = expectThrows(XContentParseException.class, () -> parser.parse(
|
||||
createParser(JsonXContent.jsonXContent, "{\"noop\": [{\"garbage\": \"shouldn't\"}}]"),
|
||||
null));
|
||||
assertEquals("expected value but got [FIELD_NAME]", garbageError.getCause().getMessage());
|
||||
XContentParseException sneakyError = expectThrows(XContentParseException.class, () -> parser.parse(
|
||||
createParser(JsonXContent.jsonXContent, "{\"noop\": [{\"body\": \"shouldn't\"}}]"),
|
||||
null));
|
||||
assertEquals("expected value but got [FIELD_NAME]", sneakyError.getCause().getMessage());
|
||||
}
|
||||
|
||||
static class NamedObjectHolder {
|
||||
public static final ObjectParser<NamedObjectHolder, Void> PARSER = new ObjectParser<>("named_object_holder",
|
||||
NamedObjectHolder::new);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
|
|||
}
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase {
|
|||
}
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class ConnectionManager implements Closeable {
|
|||
private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener();
|
||||
|
||||
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) {
|
||||
this(settings, transport, threadPool, buildDefaultConnectionProfile(settings));
|
||||
this(settings, transport, threadPool, ConnectionProfile.buildDefaultConnectionProfile(settings));
|
||||
}
|
||||
|
||||
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) {
|
||||
|
@ -323,23 +323,4 @@ public class ConnectionManager implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) {
|
||||
int connectionsPerNodeRecovery = TransportService.CONNECTIONS_PER_NODE_RECOVERY.get(settings);
|
||||
int connectionsPerNodeBulk = TransportService.CONNECTIONS_PER_NODE_BULK.get(settings);
|
||||
int connectionsPerNodeReg = TransportService.CONNECTIONS_PER_NODE_REG.get(settings);
|
||||
int connectionsPerNodeState = TransportService.CONNECTIONS_PER_NODE_STATE.get(settings);
|
||||
int connectionsPerNodePing = TransportService.CONNECTIONS_PER_NODE_PING.get(settings);
|
||||
ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
|
||||
builder.setConnectTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings));
|
||||
builder.setHandshakeTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings));
|
||||
builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK);
|
||||
builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING);
|
||||
// if we are not master eligible we don't need a dedicated channel to publish the state
|
||||
builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE);
|
||||
// if we are not a data-node we don't need any dedicated channels for recovery
|
||||
builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY);
|
||||
builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG);
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -91,6 +93,31 @@ public final class ConnectionProfile {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a default connection profile based on the provided settings.
|
||||
*
|
||||
* @param settings to build the connection profile from
|
||||
* @return the connection profile
|
||||
*/
|
||||
public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) {
|
||||
int connectionsPerNodeRecovery = TransportService.CONNECTIONS_PER_NODE_RECOVERY.get(settings);
|
||||
int connectionsPerNodeBulk = TransportService.CONNECTIONS_PER_NODE_BULK.get(settings);
|
||||
int connectionsPerNodeReg = TransportService.CONNECTIONS_PER_NODE_REG.get(settings);
|
||||
int connectionsPerNodeState = TransportService.CONNECTIONS_PER_NODE_STATE.get(settings);
|
||||
int connectionsPerNodePing = TransportService.CONNECTIONS_PER_NODE_PING.get(settings);
|
||||
Builder builder = new Builder();
|
||||
builder.setConnectTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings));
|
||||
builder.setHandshakeTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings));
|
||||
builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK);
|
||||
builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING);
|
||||
// if we are not master eligible we don't need a dedicated channel to publish the state
|
||||
builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE);
|
||||
// if we are not a data-node we don't need any dedicated channels for recovery
|
||||
builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY);
|
||||
builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* A builder to build a new {@link ConnectionProfile}
|
||||
*/
|
||||
|
|
|
@ -64,7 +64,7 @@ public class ConnectionManagerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testConnectionProfileResolve() {
|
||||
final ConnectionProfile defaultProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
final ConnectionProfile defaultProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
assertEquals(defaultProfile, ConnectionProfile.resolveConnectionProfile(null, defaultProfile));
|
||||
|
||||
final ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
|
||||
|
@ -96,7 +96,7 @@ public class ConnectionManagerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDefaultConnectionProfile() {
|
||||
ConnectionProfile profile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
ConnectionProfile profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
assertEquals(13, profile.getNumConnections());
|
||||
assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING));
|
||||
assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG));
|
||||
|
@ -104,7 +104,7 @@ public class ConnectionManagerTests extends ESTestCase {
|
|||
assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY));
|
||||
assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK));
|
||||
|
||||
profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.master", false).build());
|
||||
profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.builder().put("node.master", false).build());
|
||||
assertEquals(12, profile.getNumConnections());
|
||||
assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING));
|
||||
assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG));
|
||||
|
@ -112,7 +112,7 @@ public class ConnectionManagerTests extends ESTestCase {
|
|||
assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY));
|
||||
assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK));
|
||||
|
||||
profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).build());
|
||||
profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).build());
|
||||
assertEquals(11, profile.getNumConnections());
|
||||
assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING));
|
||||
assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG));
|
||||
|
@ -120,7 +120,7 @@ public class ConnectionManagerTests extends ESTestCase {
|
|||
assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY));
|
||||
assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK));
|
||||
|
||||
profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.data", false)
|
||||
profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.builder().put("node.data", false)
|
||||
.put("node.master", false).build());
|
||||
assertEquals(10, profile.getNumConnections());
|
||||
assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING));
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.http.message.BasicHeader;
|
|||
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
|
||||
import org.apache.http.ssl.SSLContexts;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
|
@ -73,6 +74,7 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
|
@ -105,25 +107,13 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Does the cluster being tested have xpack installed?
|
||||
* Does any node in the cluster being tested have x-pack installed?
|
||||
*/
|
||||
public static boolean hasXPack() throws IOException {
|
||||
RestClient client = adminClient();
|
||||
if (client == null) {
|
||||
if (hasXPack == null) {
|
||||
throw new IllegalStateException("must be called inside of a rest test case test");
|
||||
}
|
||||
Map<?, ?> response = entityAsMap(client.performRequest(new Request("GET", "_nodes/plugins")));
|
||||
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
|
||||
for (Map.Entry<?, ?> node : nodes.entrySet()) {
|
||||
Map<?, ?> nodeInfo = (Map<?, ?>) node.getValue();
|
||||
for (Object module: (List<?>) nodeInfo.get("modules")) {
|
||||
Map<?, ?> moduleInfo = (Map<?, ?>) module;
|
||||
if (moduleInfo.get("name").toString().startsWith("x-pack-")) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return hasXPack;
|
||||
}
|
||||
|
||||
private static List<HttpHost> clusterHosts;
|
||||
|
@ -136,12 +126,16 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
* completes
|
||||
*/
|
||||
private static RestClient adminClient;
|
||||
private static Boolean hasXPack;
|
||||
private static TreeSet<Version> nodeVersions;
|
||||
|
||||
@Before
|
||||
public void initClient() throws IOException {
|
||||
if (client == null) {
|
||||
assert adminClient == null;
|
||||
assert clusterHosts == null;
|
||||
assert hasXPack == null;
|
||||
assert nodeVersions == null;
|
||||
String cluster = System.getProperty("tests.rest.cluster");
|
||||
if (cluster == null) {
|
||||
throw new RuntimeException("Must specify [tests.rest.cluster] system property with a comma delimited list of [host:port] "
|
||||
|
@ -162,10 +156,27 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
logger.info("initializing REST clients against {}", clusterHosts);
|
||||
client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()]));
|
||||
adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()]));
|
||||
|
||||
hasXPack = false;
|
||||
nodeVersions = new TreeSet<>();
|
||||
Map<?, ?> response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins")));
|
||||
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
|
||||
for (Map.Entry<?, ?> node : nodes.entrySet()) {
|
||||
Map<?, ?> nodeInfo = (Map<?, ?>) node.getValue();
|
||||
nodeVersions.add(Version.fromString(nodeInfo.get("version").toString()));
|
||||
for (Object module: (List<?>) nodeInfo.get("modules")) {
|
||||
Map<?, ?> moduleInfo = (Map<?, ?>) module;
|
||||
if (moduleInfo.get("name").toString().startsWith("x-pack-")) {
|
||||
hasXPack = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert client != null;
|
||||
assert adminClient != null;
|
||||
assert clusterHosts != null;
|
||||
assert hasXPack != null;
|
||||
assert nodeVersions != null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -195,6 +206,8 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
clusterHosts = null;
|
||||
client = null;
|
||||
adminClient = null;
|
||||
hasXPack = null;
|
||||
nodeVersions = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,8 +348,6 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
private void wipeCluster() throws Exception {
|
||||
boolean hasXPack = hasXPack();
|
||||
|
||||
if (preserveIndicesUponCompletion() == false) {
|
||||
// wipe indices
|
||||
try {
|
||||
|
|
|
@ -116,7 +116,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake);
|
||||
|
||||
protected int channelsPerNodeConnection() {
|
||||
return 13;
|
||||
// This is a customized profile for this test case.
|
||||
return 6;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,9 +126,17 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
super.setUp();
|
||||
threadPool = new TestThreadPool(getClass().getName());
|
||||
clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
serviceA = buildService("TS_A", version0, clusterSettings); // this one supports dynamic tracer updates
|
||||
Settings connectionSettings = Settings.builder()
|
||||
.put(TransportService.CONNECTIONS_PER_NODE_RECOVERY.getKey(), 1)
|
||||
.put(TransportService.CONNECTIONS_PER_NODE_BULK.getKey(), 1)
|
||||
.put(TransportService.CONNECTIONS_PER_NODE_REG.getKey(), 2)
|
||||
.put(TransportService.CONNECTIONS_PER_NODE_STATE.getKey(), 1)
|
||||
.put(TransportService.CONNECTIONS_PER_NODE_PING.getKey(), 1)
|
||||
.build();
|
||||
|
||||
serviceA = buildService("TS_A", version0, clusterSettings, connectionSettings); // this one supports dynamic tracer updates
|
||||
nodeA = serviceA.getLocalNode();
|
||||
serviceB = buildService("TS_B", version1, null); // this one doesn't support dynamic tracer updates
|
||||
serviceB = buildService("TS_B", version1, null, connectionSettings); // this one doesn't support dynamic tracer updates
|
||||
nodeB = serviceB.getLocalNode();
|
||||
// wait till all nodes are properly connected and the event has been sent, so tests in this class
|
||||
// will not get this callback called on the connections done in this setup
|
||||
|
@ -174,7 +183,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
protected MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings) {
|
||||
return buildService(name, version, clusterSettings, Settings.EMPTY, true, true);
|
||||
return buildService(name, version, clusterSettings, Settings.EMPTY);
|
||||
}
|
||||
|
||||
protected MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings,
|
||||
Settings settings) {
|
||||
return buildService(name, version, clusterSettings, settings, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1999,7 +2013,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
assertEquals("handshake failed", exception.getCause().getMessage());
|
||||
}
|
||||
|
||||
ConnectionProfile connectionProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
try (TransportService service = buildService("TS_TPC", Version.CURRENT, null);
|
||||
TcpTransport.NodeChannels connection = originalTransport.openConnection(
|
||||
new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0),
|
||||
|
|
|
@ -50,7 +50,7 @@ public class MockTcpTransportTests extends AbstractSimpleTransportTestCase {
|
|||
}
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ public class SimpleMockNioTransportTests extends AbstractSimpleTransportTestCase
|
|||
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.test.transport.MockTransportService;
|
|||
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.ConnectionManager;
|
||||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
|
@ -111,7 +110,7 @@ public abstract class AbstractSimpleSecurityTransportTestCase extends AbstractSi
|
|||
assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport);
|
||||
TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport();
|
||||
|
||||
ConnectionProfile connectionProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY);
|
||||
try (TransportService service = buildService("TS_TPC", Version.CURRENT, null);
|
||||
TcpTransport.NodeChannels connection = originalTransport.openConnection(
|
||||
new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0),
|
||||
|
|
|
@ -93,7 +93,7 @@ public class SimpleSecurityNetty4ServerTransportTests extends AbstractSimpleSecu
|
|||
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings,
|
||||
MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings,
|
||||
Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
|
|
|
@ -55,7 +55,7 @@ public class SimpleSecurityNioTransportTests extends AbstractSimpleSecurityTrans
|
|||
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings,
|
||||
MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings,
|
||||
Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.xpack.sql.plan.logical.Distinct;
|
|||
import org.elasticsearch.xpack.sql.plan.logical.Filter;
|
||||
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
|
||||
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
|
||||
import org.elasticsearch.xpack.sql.plan.logical.Project;
|
||||
import org.elasticsearch.xpack.sql.tree.Node;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
import org.elasticsearch.xpack.sql.util.StringUtils;
|
||||
|
@ -238,8 +239,17 @@ final class Verifier {
|
|||
Set<LogicalPlan> groupingFailures, Map<String, Function> functions) {
|
||||
if (p instanceof OrderBy) {
|
||||
OrderBy o = (OrderBy) p;
|
||||
if (o.child() instanceof Aggregate) {
|
||||
Aggregate a = (Aggregate) o.child();
|
||||
LogicalPlan child = o.child();
|
||||
|
||||
if (child instanceof Project) {
|
||||
child = ((Project) child).child();
|
||||
}
|
||||
if (child instanceof Filter) {
|
||||
child = ((Filter) child).child();
|
||||
}
|
||||
|
||||
if (child instanceof Aggregate) {
|
||||
Aggregate a = (Aggregate) child;
|
||||
|
||||
Map<Expression, Node<?>> missing = new LinkedHashMap<>();
|
||||
o.order().forEach(oe -> {
|
||||
|
@ -253,7 +263,7 @@ final class Verifier {
|
|||
// take aliases declared inside the aggregates which point to the grouping (but are not included in there)
|
||||
// to correlate them to the order
|
||||
List<Expression> groupingAndMatchingAggregatesAliases = new ArrayList<>(a.groupings());
|
||||
|
||||
|
||||
a.aggregates().forEach(as -> {
|
||||
if (as instanceof Alias) {
|
||||
Alias al = (Alias) as;
|
||||
|
@ -262,10 +272,13 @@ final class Verifier {
|
|||
}
|
||||
}
|
||||
});
|
||||
|
||||
// make sure to compare attributes directly
|
||||
if (Expressions.anyMatch(groupingAndMatchingAggregatesAliases,
|
||||
g -> e.semanticEquals(e instanceof Attribute ? Expressions.attribute(g) : g))) {
|
||||
|
||||
// Make sure you can apply functions on top of the grouped by expressions in the ORDER BY:
|
||||
// e.g.: if "GROUP BY f2(f1(field))" you can "ORDER BY f4(f3(f2(f1(field))))"
|
||||
//
|
||||
// Also, make sure to compare attributes directly
|
||||
if (e.anyMatch(expression -> Expressions.anyMatch(groupingAndMatchingAggregatesAliases,
|
||||
g -> expression.semanticEquals(expression instanceof Attribute ? Expressions.attribute(g) : g)))) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -288,7 +301,6 @@ final class Verifier {
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
private static boolean checkGroupByHaving(LogicalPlan p, Set<Failure> localFailures,
|
||||
Set<LogicalPlan> groupingFailures, Map<String, Function> functions) {
|
||||
if (p instanceof Filter) {
|
||||
|
|
|
@ -118,6 +118,11 @@ public class VerifierErrorMessagesTests extends ESTestCase {
|
|||
verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY bool"));
|
||||
}
|
||||
|
||||
public void testGroupByOrderByNonGrouped_WithHaving() {
|
||||
assertEquals("1:71: Cannot order by non-grouped column [bool], expected [text]",
|
||||
verify("SELECT MAX(int) FROM test GROUP BY text HAVING MAX(int) > 10 ORDER BY bool"));
|
||||
}
|
||||
|
||||
public void testGroupByOrderByAliasedInSelectAllowed() {
|
||||
LogicalPlan lp = accepted("SELECT text t FROM test GROUP BY text ORDER BY t");
|
||||
assertNotNull(lp);
|
||||
|
@ -128,6 +133,11 @@ public class VerifierErrorMessagesTests extends ESTestCase {
|
|||
verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY YEAR(date)"));
|
||||
}
|
||||
|
||||
public void testGroupByOrderByScalarOverNonGrouped_WithHaving() {
|
||||
assertEquals("1:71: Cannot order by non-grouped column [YEAR(date [UTC])], expected [text]",
|
||||
verify("SELECT MAX(int) FROM test GROUP BY text HAVING MAX(int) > 10 ORDER BY YEAR(date)"));
|
||||
}
|
||||
|
||||
public void testGroupByHavingNonGrouped() {
|
||||
assertEquals("1:48: Cannot filter by non-grouped column [int], expected [text]",
|
||||
verify("SELECT AVG(int) FROM test GROUP BY text HAVING int > 10"));
|
||||
|
|
Loading…
Reference in New Issue