From 8e33a5292aa73380f1f6cb9359368783e50f1715 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 9 May 2019 12:59:45 +0200 Subject: [PATCH 01/67] Add HTML strip processor (#41888) This processor uses the lucene HTMLStripCharFilter class to remove HTML entities from a field. This adds to the char filter, so that there is possibility to store the stripped version as well. Note, that the characeter filter replaces tags with a newline, so that the produced HTML will look slightly different than the incoming HTML with regards to newlines. --- docs/reference/ingest/ingest-node.asciidoc | 1 + .../ingest/processors/html_strip.asciidoc | 26 +++++++ .../ingest/common/HtmlStripProcessor.java | 76 +++++++++++++++++++ .../ingest/common/IngestCommonPlugin.java | 1 + .../HtmlStripProcessorFactoryTests.java | 27 +++++++ .../common/HtmlStripProcessorTests.java | 38 ++++++++++ .../rest-api-spec/test/ingest/10_basic.yml | 1 + .../rest-api-spec/test/ingest/40_mutate.yml | 9 ++- 8 files changed, 178 insertions(+), 1 deletion(-) create mode 100644 docs/reference/ingest/processors/html_strip.asciidoc create mode 100644 modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java create mode 100644 modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java create mode 100644 modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 3c8d8e9abf2..1f8abc5675d 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -864,6 +864,7 @@ include::processors/foreach.asciidoc[] include::processors/geoip.asciidoc[] include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] +include::processors/html_strip.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] diff --git a/docs/reference/ingest/processors/html_strip.asciidoc b/docs/reference/ingest/processors/html_strip.asciidoc new file mode 100644 index 00000000000..2fa3cd7bbb8 --- /dev/null +++ b/docs/reference/ingest/processors/html_strip.asciidoc @@ -0,0 +1,26 @@ +[[htmlstrip-processor]] +=== HTML Strip Processor +Removes HTML from field. + +NOTE: Each HTML tag is replaced with a `\n` character. + +[[htmlstrip-options]] +.HTML Strip Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The string-valued field to remove HTML tags from +| `target_field` | no | `field` | The field to assign the value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "html_strip": { + "field": "foo" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java new file mode 100644 index 00000000000..aaeb5b3310b --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter; +import org.elasticsearch.ElasticsearchException; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + +public final class HtmlStripProcessor extends AbstractStringProcessor { + + public static final String TYPE = "html_strip"; + + HtmlStripProcessor(String tag, String field, boolean ignoreMissing, String targetField) { + super(tag, field, ignoreMissing, targetField); + } + + @Override + protected String process(String value) { + // shortcut, no need to create a string builder and go through each char + if (value.contains("<") == false || value.contains(">") == false) { + return value; + } + + HTMLStripCharFilter filter = new HTMLStripCharFilter(new StringReader(value)); + + StringBuilder builder = new StringBuilder(); + int ch; + try { + while ((ch = filter.read()) != -1) { + builder.append((char)ch); + } + } catch (IOException e) { + throw new ElasticsearchException(e); + } + + return builder.toString(); + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory extends AbstractStringProcessor.Factory { + + public Factory() { + super(TYPE); + } + + @Override + protected HtmlStripProcessor newProcessor(String tag, Map config, String field, + boolean ignoreMissing, String targetField) { + return new HtmlStripProcessor(tag, field, ignoreMissing, targetField); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index a839e147c77..aa498f3eadd 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -87,6 +87,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)); processors.put(DissectProcessor.TYPE, new DissectProcessor.Factory()); processors.put(DropProcessor.TYPE, new DropProcessor.Factory()); + processors.put(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java new file mode 100644 index 00000000000..ccadcd2770f --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +public class HtmlStripProcessorFactoryTests extends AbstractStringProcessorFactoryTestCase { + @Override + protected AbstractStringProcessor.Factory newFactory() { + return new HtmlStripProcessor.Factory(); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java new file mode 100644 index 00000000000..79ccff84a72 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +public class HtmlStripProcessorTests extends AbstractStringProcessorTestCase { + + @Override + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + return new HtmlStripProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); + } + + @Override + protected String modifyInput(String input) { + return "

test" + input + "

test"; + } + + @Override + protected String expectedResult(String input) { + return "\ntest" + input + "\ntest"; + } +} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml index f83a9e78cb3..8a803eae1fc 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml @@ -23,6 +23,7 @@ - contains: { nodes.$master.ingest.processors: { type: foreach } } - contains: { nodes.$master.ingest.processors: { type: grok } } - contains: { nodes.$master.ingest.processors: { type: gsub } } + - contains: { nodes.$master.ingest.processors: { type: html_strip } } - contains: { nodes.$master.ingest.processors: { type: join } } - contains: { nodes.$master.ingest.processors: { type: json } } - contains: { nodes.$master.ingest.processors: { type: kv } } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml index 11b6a64cd3f..9de9d19c0b8 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml @@ -76,6 +76,11 @@ teardown: "pattern" : "-", "replacement" : "." } + }, + { + "html_strip" : { + "field" : "field_to_html_strip" + } } ] } @@ -96,7 +101,8 @@ teardown: "field_to_split": "127-0-0-1", "field_to_join": ["127","0","0","1"], "field_to_convert": ["127","0","0","1"], - "field_to_gsub": "127-0-0-1" + "field_to_gsub": "127-0-0-1", + "field_to_html_strip": "

this is a test" } - do: @@ -114,6 +120,7 @@ teardown: - match: { _source.field_to_join: "127-0-0-1" } - match: { _source.field_to_convert: [127,0,0,1] } - match: { _source.field_to_gsub: "127.0.0.1" } + - match: { _source.field_to_html_strip: "\nthis \nis\n a test" } --- "Test metadata": From 309e4a11b5060e290e6b2ab0bce6e61a421b5cd5 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Thu, 9 May 2019 13:08:33 +0100 Subject: [PATCH 02/67] Cut AnalyzeResponse over to Writeable (#41915) This commit makes AnalyzeResponse and its various helper classes implement Writeable. The classes are also now immutable. Relates to #34389 --- .../admin/indices/analyze/AnalyzeAction.java | 8 +- .../indices/analyze/AnalyzeResponse.java | 101 +++++------ .../analyze/DetailAnalyzeResponse.java | 165 +++++++----------- .../analyze/TransportAnalyzeAction.java | 8 +- .../shard/TransportSingleShardAction.java | 21 ++- .../indices/analyze/AnalyzeResponseTests.java | 26 +-- 6 files changed, 156 insertions(+), 173 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index e2bbd655992..3677cd6cb4e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class AnalyzeAction extends Action { @@ -30,8 +31,13 @@ public class AnalyzeAction extends Action { super(NAME); } + @Override + public Writeable.Reader getResponseReader() { + return AnalyzeResponse::new; + } + @Override public AnalyzeResponse newResponse() { - return new AnalyzeResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index e571db951cb..945c2128bab 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,17 +43,14 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpect public class AnalyzeResponse extends ActionResponse implements Iterable, ToXContentObject { - public static class AnalyzeToken implements Streamable, ToXContentObject { - private String term; - private int startOffset; - private int endOffset; - private int position; - private int positionLength = 1; - private Map attributes; - private String type; - - AnalyzeToken() { - } + public static class AnalyzeToken implements Writeable, ToXContentObject { + private final String term; + private final int startOffset; + private final int endOffset; + private final int position; + private final int positionLength; + private final Map attributes; + private final String type; @Override public boolean equals(Object o) { @@ -85,6 +82,21 @@ public class AnalyzeResponse extends ActionResponse implements Iterable tokens; - - AnalyzeResponse() { - } + private final List tokens; public AnalyzeResponse(List tokens, DetailAnalyzeResponse detail) { this.tokens = tokens; this.detail = detail; } + public AnalyzeResponse(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + if (size > 0) { + tokens = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + tokens.add(new AnalyzeToken(in)); + } + } + else { + tokens = null; + } + detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + public List getTokens() { return this.tokens; } @@ -268,20 +275,6 @@ public class AnalyzeResponse extends ActionResponse implements Iterable(size); - for (int i = 0; i < size; i++) { - tokens.add(AnalyzeToken.readAnalyzeToken(in)); - } - if (tokens.size() == 0) { - tokens = null; - } - detail = in.readOptionalStreamable(DetailAnalyzeResponse::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -293,7 +286,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable 0) { + charfilters = new CharFilteredText[size]; + for (int i = 0; i < size; i++) { + charfilters[i] = new CharFilteredText(in); + } + } + else { + charfilters = null; + } + size = in.readVInt(); + if (size > 0) { + tokenfilters = new AnalyzeTokenList[size]; + for (int i = 0; i < size; i++) { + tokenfilters[i] = new AnalyzeTokenList(in); + } + } + else { + tokenfilters = null; + } + analyzer = null; + } else { + analyzer = new AnalyzeTokenList(in); + tokenfilters = null; + tokenizer = null; + charfilters = null; + } } - public DetailAnalyzeResponse analyzer(AnalyzeTokenList analyzer) { - this.customAnalyzer = false; - this.analyzer = analyzer; - return this; + public AnalyzeTokenList analyzer() { + return this.analyzer; } public CharFilteredText[] charfilters() { return this.charfilters; } - public DetailAnalyzeResponse charfilters(CharFilteredText[] charfilters) { - this.customAnalyzer = true; - this.charfilters = charfilters; - return this; - } - public AnalyzeTokenList tokenizer() { return tokenizer; } - public DetailAnalyzeResponse tokenizer(AnalyzeTokenList tokenizer) { - this.customAnalyzer = true; - this.tokenizer = tokenizer; - return this; - } - public AnalyzeTokenList[] tokenfilters() { return tokenfilters; } - public DetailAnalyzeResponse tokenfilters(AnalyzeTokenList[] tokenfilters) { - this.customAnalyzer = true; - this.tokenfilters = tokenfilters; - return this; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -201,30 +207,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { static final String TOKENFILTERS = "tokenfilters"; } - @Override - public void readFrom(StreamInput in) throws IOException { - this.customAnalyzer = in.readBoolean(); - if (customAnalyzer) { - tokenizer = AnalyzeTokenList.readAnalyzeTokenList(in); - int size = in.readVInt(); - if (size > 0) { - charfilters = new CharFilteredText[size]; - for (int i = 0; i < size; i++) { - charfilters[i] = CharFilteredText.readCharFilteredText(in); - } - } - size = in.readVInt(); - if (size > 0) { - tokenfilters = new AnalyzeTokenList[size]; - for (int i = 0; i < size; i++) { - tokenfilters[i] = AnalyzeTokenList.readAnalyzeTokenList(in); - } - } - } else { - analyzer = AnalyzeTokenList.readAnalyzeTokenList(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(customAnalyzer); @@ -251,9 +233,9 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { } } - public static class AnalyzeTokenList implements Streamable, ToXContentObject { - private String name; - private AnalyzeResponse.AnalyzeToken[] tokens; + public static class AnalyzeTokenList implements Writeable, ToXContentObject { + private final String name; + private final AnalyzeResponse.AnalyzeToken[] tokens; @Override public boolean equals(Object o) { @@ -271,14 +253,25 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { return result; } - AnalyzeTokenList() { - } - public AnalyzeTokenList(String name, AnalyzeResponse.AnalyzeToken[] tokens) { this.name = name; this.tokens = tokens; } + public AnalyzeTokenList(StreamInput in) throws IOException { + name = in.readString(); + int size = in.readVInt(); + if (size > 0) { + tokens = new AnalyzeResponse.AnalyzeToken[size]; + for (int i = 0; i < size; i++) { + tokens[i] = new AnalyzeResponse.AnalyzeToken(in); + } + } + else { + tokens = null; + } + } + public String getName() { return name; } @@ -287,12 +280,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { return tokens; } - public static AnalyzeTokenList readAnalyzeTokenList(StreamInput in) throws IOException { - AnalyzeTokenList list = new AnalyzeTokenList(); - list.readFrom(in); - return list; - } - XContentBuilder toXContentWithoutObject(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.NAME, this.name); builder.startArray(AnalyzeResponse.Fields.TOKENS); @@ -327,18 +314,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { return PARSER.parse(parser, null); } - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeResponse.AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = AnalyzeResponse.AnalyzeToken.readAnalyzeToken(in); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); @@ -353,12 +328,9 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { } } - public static class CharFilteredText implements Streamable, ToXContentObject { - private String name; - private String[] texts; - - CharFilteredText() { - } + public static class CharFilteredText implements Writeable, ToXContentObject { + private final String name; + private final String[] texts; public CharFilteredText(String name, String[] texts) { this.name = name; @@ -369,6 +341,11 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { } } + public CharFilteredText(StreamInput in) throws IOException { + name = in.readString(); + texts = in.readStringArray(); + } + public String getName() { return name; } @@ -398,18 +375,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { return PARSER.parse(parser, null); } - public static CharFilteredText readCharFilteredText(StreamInput in) throws IOException { - CharFilteredText text = new CharFilteredText(); - text.readFrom(in); - return text; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - texts = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 9538bd4b4d2..07f445b6fc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; @@ -97,7 +98,12 @@ public class TransportAnalyzeAction extends TransportSingleShardAction getResponseReader() { + return AnalyzeResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 6d7ad085dcd..8b0e69bd457 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.shard.ShardId; @@ -118,8 +119,18 @@ public abstract class TransportSingleShardAction getResponseReader() { + return in -> { + Response response = newResponse(); + response.readFrom(in); + return response; + }; + } + protected abstract boolean resolveIndex(Request request); protected ClusterBlockException checkGlobalBlock(ClusterState state) { @@ -182,13 +193,12 @@ public abstract class TransportSingleShardAction reader = getResponseReader(); transportService.sendRequest(clusterService.localNode(), transportShardAction, internalRequest.request(), new TransportResponseHandler() { @Override public Response read(StreamInput in) throws IOException { - Response response = newResponse(); - response.readFrom(in); - return response; + return reader.read(in); } @Override @@ -251,14 +261,13 @@ public abstract class TransportSingleShardAction reader = getResponseReader(); transportService.sendRequest(node, transportShardAction, internalRequest.request(), new TransportResponseHandler() { @Override public Response read(StreamInput in) throws IOException { - Response response = newResponse(); - response.readFrom(in); - return response; + return reader.read(in); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java index 7f1b7fb41ba..a4cee7a4cde 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java @@ -20,12 +20,13 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +38,7 @@ import java.util.function.Predicate; import static org.hamcrest.Matchers.equalTo; -public class AnalyzeResponseTests extends AbstractStreamableXContentTestCase { +public class AnalyzeResponseTests extends AbstractSerializingTestCase { @Override protected Predicate getRandomFieldsExcludeFilter() { @@ -50,8 +51,8 @@ public class AnalyzeResponseTests extends AbstractStreamableXContentTestCase instanceReader() { + return AnalyzeResponse::new; } @Override @@ -61,21 +62,24 @@ public class AnalyzeResponseTests extends AbstractStreamableXContentTestCase Date: Thu, 9 May 2019 13:16:13 +0100 Subject: [PATCH 03/67] Mute ApiKeyIntegTests See https://github.com/elastic/elasticsearch/issues/41747 --- .../org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index f6849cae4c1..c8cea450379 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -315,6 +315,7 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { return internalCluster().client(nodeWithMostRecentRun); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/41747") public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() throws Exception { Client client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, From b7c7ca8f0946c73a1226855d2e8953069f6387d2 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 9 May 2019 14:25:07 +0200 Subject: [PATCH 04/67] Fix IAE on cross_fields query introduced in 7.0.1 (#41938) If the max doc in the index is greater than the minimum total term frequency among the requested fields we need to adjust max doc to be equal to the min ttf. This was removed by mistake when fixing #41125. Closes #41934 --- .../lucene/queries/BlendedTermQuery.java | 4 ++- .../lucene/queries/BlendedTermQueryTests.java | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 1700979c32d..c696d476bbb 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -118,7 +118,9 @@ public abstract class BlendedTermQuery extends Query { // otherwise the statistics don't match minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field())); } - + } + if (maxDoc > minSumTTF) { + maxDoc = (int)minSumTTF; } if (max == 0) { return; // we are done that term doesn't exist at all diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index ce33c247a33..9d05e119cbb 100644 --- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -227,4 +227,34 @@ public class BlendedTermQueryTests extends ESTestCase { assertThat(extracted.size(), equalTo(terms.size())); assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0]))); } + + public void testMinTTF() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); + FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); + ft.freeze(); + + for (int i = 0; i < 10; i++) { + Document d = new Document(); + d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); + d.add(new Field("dense", "foo foo foo", ft)); + if (i % 10 == 0) { + d.add(new Field("sparse", "foo", ft)); + } + w.addDocument(d); + } + w.commit(); + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = setSimilarity(newSearcher(reader)); + { + String[] fields = new String[]{"dense", "sparse"}; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f); + TopDocs search = searcher.search(query, 10); + ScoreDoc[] scoreDocs = search.scoreDocs; + assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue()); + } + reader.close(); + w.close(); + dir.close(); + } } From 256588d7735fdba4cd94975deb186c6eb9c74387 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 9 May 2019 13:46:59 +0100 Subject: [PATCH 05/67] Mute IndexStatsIT#testFilterCacheStats See https://github.com/elastic/elasticsearch/issues/32506 --- .../test/java/org/elasticsearch/indices/stats/IndexStatsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 5fb67a64d9d..a3697af50b0 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -1009,6 +1009,7 @@ public class IndexStatsIT extends ESIntegTestCase { } @TestLogging("_root:DEBUG") // this fails at a very low rate on CI: https://github.com/elastic/elasticsearch/issues/32506 + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); assertAcked(prepareCreate("index").setSettings(settings).get()); From 0b21fb0ee6ac99a52e4a33ce23cfd59db5c43676 Mon Sep 17 00:00:00 2001 From: Daniel Schneiter Date: Thu, 9 May 2019 15:08:31 +0200 Subject: [PATCH 06/67] Mentioned the name of the icu_analyzer --- docs/plugins/analysis-icu.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index b6299139992..69d741fa79a 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -29,7 +29,7 @@ include::install_remove.asciidoc[] [[analysis-icu-analyzer]] ==== ICU Analyzer -Performs basic normalization, tokenization and character folding, using the +The `icu_analyzer` analyzer performs basic normalization, tokenization and character folding, using the `icu_normalizer` char filter, `icu_tokenizer` and `icu_normalizer` token filter The following parameters are accepted: From 7ee541546a7a7db88028b526355120f0c5153a80 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 9 May 2019 09:19:30 -0400 Subject: [PATCH 07/67] Account for Java 8 in JVM options parsing Java 8 presents the JVM options slightly differently when displaying via -XX:+PrintFlagsFinal. This commit adapts the JVM options parser for this possibility. Relates #42009 --- .../org/elasticsearch/tools/launchers/JvmErgonomics.java | 2 +- .../elasticsearch/tools/launchers/JvmErgonomicsTests.java | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index 44f60ba2d9f..e0e6632aa24 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -68,7 +68,7 @@ final class JvmErgonomics { } private static final Pattern OPTION = - Pattern.compile("^\\s*\\S+\\s+(?\\S+)\\s+:?=\\s+(?\\S+)?\\s+\\{[^}]+?\\}\\s+\\{[^}]+}"); + Pattern.compile("^\\s*\\S+\\s+(?\\S+)\\s+:?=\\s+(?\\S+)?\\s+\\{[^}]+?\\}(\\s+\\{[^}]+})?"); static Map> finalJvmOptions( final List userDefinedJvmOptions) throws InterruptedException, IOException { diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index b5b6699f471..c24cbdb097b 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -71,7 +72,9 @@ public class JvmErgonomicsTests extends LaunchersTestCase { fail("expected starting java to fail"); } catch (final RuntimeException e) { assertThat(e, hasToString(containsString(("starting java failed")))); - assertThat(e, hasToString(containsString(("Too small maximum heap")))); + assertThat( + e, + anyOf(hasToString(containsString("Too small initial heap")), hasToString(containsString("Too small maximum heap")))); } } From ea5019665a8847a26f03bae901dc4b7f57e80fa6 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 9 May 2019 09:51:12 -0400 Subject: [PATCH 08/67] [DOCS] Replace table with def list for ids query (#41865) --- docs/reference/query-dsl/ids-query.asciidoc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index 70554e1acbf..43de8cb7332 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -21,8 +21,5 @@ GET /_search [[ids-query-top-level-parameters]] ==== Top-level parameters for `ids` -[cols="v,v",options="header"] -|====== -|Parameter |Description -|`values` |An array of <>. -|====== \ No newline at end of file +`values`:: +An array of <>. \ No newline at end of file From 970a2254c3c623dc92fa2e19c6956606e4a128d4 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 9 May 2019 10:10:27 -0400 Subject: [PATCH 09/67] Limit max direct memory size to half of heap size (#42006) This commit adds an ergonomic choice ot the max direct memory size such that if it is not set, we default it to half of the heap size. --- .../tools/launchers/JvmErgonomics.java | 8 +++ .../tools/launchers/JvmErgonomicsTests.java | 57 ++++++++++++++++--- 2 files changed, 56 insertions(+), 9 deletions(-) diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index e0e6632aa24..d18ac681d75 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -64,6 +64,10 @@ final class JvmErgonomics { ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); } } + final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions); + if (maxDirectMemorySize == 0) { + ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2); + } return ergonomicChoices; } @@ -122,6 +126,10 @@ final class JvmErgonomics { return Long.parseLong(finalJvmOptions.get("MaxHeapSize").get()); } + static long extractMaxDirectMemorySize(final Map> finalJvmOptions) { + return Long.parseLong(finalJvmOptions.get("MaxDirectMemorySize").get()); + } + private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?[\\w+].*?)=(?.*)$"); // package private for testing diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index c24cbdb097b..9623e030d19 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -23,14 +23,17 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -88,6 +91,19 @@ public class JvmErgonomicsTests extends LaunchersTestCase { } } + public void testMaxDirectMemorySizeUnset() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractMaxDirectMemorySize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx1g"))), + equalTo(0L)); + } + + public void testMaxDirectMemorySizeSet() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractMaxDirectMemorySize(JvmErgonomics.finalJvmOptions( + Arrays.asList("-Xmx1g", "-XX:MaxDirectMemorySize=512m"))), + equalTo(512L << 20)); + } + public void testExtractSystemProperties() { Map expectedSystemProperties = new HashMap<>(); expectedSystemProperties.put("file.encoding", "UTF-8"); @@ -104,16 +120,39 @@ public class JvmErgonomicsTests extends LaunchersTestCase { assertTrue(parsedSystemProperties.isEmpty()); } - public void testLittleMemoryErgonomicChoices() throws InterruptedException, IOException { - String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); - List expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=unpooled"); - assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap))); + public void testPooledMemoryChoiceOnSmallHeap() throws InterruptedException, IOException { + final String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)), + hasItem("-Dio.netty.allocator.type=unpooled")); } - public void testPlentyMemoryErgonomicChoices() throws InterruptedException, IOException { - String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); - List expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=pooled"); - assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap))); + public void testPooledMemoryChoiceOnNotSmallHeap() throws InterruptedException, IOException { + final String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)), + hasItem("-Dio.netty.allocator.type=pooled")); + } + + public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException { + final Map heapMaxDirectMemorySize = Map.of( + "64M", Long.toString((64L << 20) / 2), + "512M", Long.toString((512L << 20) / 2), + "1024M", Long.toString((1024L << 20) / 2), + "1G", Long.toString((1L << 30) / 2), + "2048M", Long.toString((2048L << 20) / 2), + "2G", Long.toString((2L << 30) / 2), + "8G", Long.toString((8L << 30) / 2)); + final String heapSize = randomFrom(heapMaxDirectMemorySize.keySet().toArray(String[]::new)); + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms" + heapSize, "-Xmx" + heapSize)), + hasItem("-XX:MaxDirectMemorySize=" + heapMaxDirectMemorySize.get(heapSize))); + } + + public void testMaxDirectMemorySizeChoiceWhenSet() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=1g")), + everyItem(not(startsWith("-XX:MaxDirectMemorySize=")))); } } From f3bcc4fc225045ee798b76946b385435845f9aa0 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 9 May 2019 08:02:43 -0600 Subject: [PATCH 10/67] Default seed address tests account for no IPv6 (#41971) This change makes the default seed address tests account for the lack of an IPv6 network. By default docker containers only run with IPv4 and these tests fail in a vanilla installation of elasticsearch-ci. To resolve this we only expect IPv6 seed addresses if IPv6 is available. Relates #41404 --- .../transport/TcpTransportTests.java | 55 +++++++++++-------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 80d183e499e..17106508ae7 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockPageCacheRecycler; @@ -97,54 +98,64 @@ public class TcpTransportTests extends ESTestCase { } public void testDefaultSeedAddressesWithDefaultPort() { - testDefaultSeedAddresses(Settings.EMPTY, containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", "[::1]:9303", "[::1]:9304", "[::1]:9305", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305")); + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder( + "[::1]:9300", "[::1]:9301", "[::1]:9302", "[::1]:9303", "[::1]:9304", "[::1]:9305", + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305") : + containsInAnyOrder( + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305"); + testDefaultSeedAddresses(Settings.EMPTY, seedAddressMatcher); } public void testDefaultSeedAddressesWithNonstandardGlobalPortRange() { - testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500-9600").build(), containsInAnyOrder( - "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", - "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505")); + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder( + "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505") : + containsInAnyOrder( + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505"); + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500-9600").build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithSmallGlobalPortRange() { - testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9300-9302").build(), containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9300", "[::1]:9301", "[::1]:9302", "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302") : + containsInAnyOrder("127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302"); + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9300-9302").build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithNonstandardProfilePortRange() { + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505") : + containsInAnyOrder("127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505"); testDefaultSeedAddresses(Settings.builder() .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9500-9600") - .build(), - containsInAnyOrder( - "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", - "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505")); + .build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithSmallProfilePortRange() { + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9300", "[::1]:9301", "[::1]:9302", "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302") : + containsInAnyOrder("127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302"); testDefaultSeedAddresses(Settings.builder() .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9300-9302") - .build(), - containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + .build(), seedAddressMatcher); } public void testDefaultSeedAddressesPrefersProfileSettingToGlobalSetting() { + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9300", "[::1]:9301", "[::1]:9302", "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302") : + containsInAnyOrder("127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302"); testDefaultSeedAddresses(Settings.builder() .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9300-9302") .put(TransportSettings.PORT.getKey(), "9500-9600") - .build(), - containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + .build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithNonstandardSinglePort() { testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500").build(), - containsInAnyOrder("[::1]:9500", "127.0.0.1:9500")); + NetworkUtils.SUPPORTS_V6 ? containsInAnyOrder("[::1]:9500", "127.0.0.1:9500") : containsInAnyOrder("127.0.0.1:9500")); } private void testDefaultSeedAddresses(final Settings settings, Matcher> seedAddressesMatcher) { From 598e0962ed5fce0034a5261a823d446d57e4d9da Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 9 May 2019 10:23:36 -0400 Subject: [PATCH 11/67] Fix compilation in JVMErgonomicsTests This issue arose after a cherry-pick from the wrong branch. Sorry. This commit addresses the issue. --- .../tools/launchers/JvmErgonomicsTests.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index 9623e030d19..434c6e3bfa3 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -135,15 +135,15 @@ public class JvmErgonomicsTests extends LaunchersTestCase { } public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException { - final Map heapMaxDirectMemorySize = Map.of( - "64M", Long.toString((64L << 20) / 2), - "512M", Long.toString((512L << 20) / 2), - "1024M", Long.toString((1024L << 20) / 2), - "1G", Long.toString((1L << 30) / 2), - "2048M", Long.toString((2048L << 20) / 2), - "2G", Long.toString((2L << 30) / 2), - "8G", Long.toString((8L << 30) / 2)); - final String heapSize = randomFrom(heapMaxDirectMemorySize.keySet().toArray(String[]::new)); + final Map heapMaxDirectMemorySize = new HashMap<>(); + heapMaxDirectMemorySize.put("64M", Long.toString((64L << 20) / 2)); + heapMaxDirectMemorySize.put("512M", Long.toString((512L << 20) / 2)); + heapMaxDirectMemorySize.put("1024M", Long.toString((1024L << 20) / 2)); + heapMaxDirectMemorySize.put("1G", Long.toString((1L << 30) / 2)); + heapMaxDirectMemorySize.put("2048M", Long.toString((2048L << 20) / 2)); + heapMaxDirectMemorySize.put("2G", Long.toString((2L << 30) / 2)); + heapMaxDirectMemorySize.put("8G", Long.toString((8L << 30) / 2)); + final String heapSize = randomFrom(heapMaxDirectMemorySize.keySet().toArray(new String[0])); assertThat( JvmErgonomics.choose(Arrays.asList("-Xms" + heapSize, "-Xmx" + heapSize)), hasItem("-XX:MaxDirectMemorySize=" + heapMaxDirectMemorySize.get(heapSize))); From 9284a70ec8905225731712b76589189ab0d08ce1 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 9 May 2019 07:48:23 -0700 Subject: [PATCH 12/67] [DOCS] Updates security configuration overview (#41982) --- .../docs/en/security/configuring-es.asciidoc | 55 ++++++++++--------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index b34e6e0c0e9..fdc49ef21e2 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -11,38 +11,31 @@ such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see {stack-ov}/elasticsearch-security.html[Securing the {stack}]. -To use {es} {security-features}: - -. Verify that you are using a license that includes the {security-features}. +. Verify that you are using a license that includes the specific +{security-features} you want. + -- -If you want to try all of the platinum features, you can start a 30-day trial. -At the end of the trial period, you can purchase a subscription to keep using -the full functionality. For more information, see -https://www.elastic.co/subscriptions and -{stack-ov}/license-management.html[License Management]. +For more information, see https://www.elastic.co/subscriptions and +{stack-ov}/license-management.html[License management]. -- . Verify that the `xpack.security.enabled` setting is `true` on each node in -your cluster. If you are using a trial license, the default value is `false`. -For more information, see {ref}/security-settings.html[Security Settings in {es}]. +your cluster. If you are using basic or trial licenses, the default value is `false`. +For more information, see {ref}/security-settings.html[Security settings in {es}]. . If you plan to run {es} in a Federal Information Processing Standard (FIPS) 140-2 enabled JVM, see <>. -. Configure Transport Layer Security (TLS/SSL) for internode-communication. +. <>. + -- NOTE: This requirement applies to clusters with more than one node and to clusters with a single node that listens on an external interface. Single-node clusters that use a loopback interface do not have this requirement. For more information, see -{stack-ov}/encrypting-communications.html[Encrypting Communications]. +{stack-ov}/encrypting-communications.html[Encrypting communications]. -- -.. <>. - -.. <>. . If it is not already running, start {es}. @@ -72,14 +65,20 @@ user API. -- -. Choose which types of realms you want to use to authenticate users. -** <>. -** <>. -** <>. -** <>. -** <>. -** <>. -** <>. +. Choose which types of realms you want to use to authenticate users. ++ +-- +TIP: The types of authentication realms that you can enable varies according to +your subscription. For more information, see https://www.elastic.co/subscriptions. + +-- +** <> +** <> +** <> +** <> +** <> +** <> +** <> . Set up roles and users to control access to {es}. + @@ -114,10 +113,13 @@ curl -XPOST -u elastic 'localhost:9200/_security/user/johndoe' -H "Content-Type: // NOTCONSOLE -- -. [[enable-auditing]]Enable auditing to keep track of attempted and successful interactions with - your {es} cluster: +. [[enable-auditing]](Optional) Enable auditing to keep track of attempted and +successful interactions with your {es} cluster: + -- +TIP: Audit logging is available with specific subscriptions. For more +information, see https://www.elastic.co/subscriptions. + .. Add the following setting to `elasticsearch.yml` on all nodes in your cluster: + [source,yaml] @@ -134,6 +136,9 @@ Events are logged to a dedicated `_audit.json` file in `ES_HOME/logs`, on each cluster node. -- +To walk through the configuration of {security-features} in {es}, {kib}, {ls}, and {metricbeat}, see +{stack-ov}/security-getting-started.html[Getting started with security]. + :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc include::{es-repo-dir}/security/securing-communications/securing-elasticsearch.asciidoc[] From 2998c107fbfd06f1ffe563e4357438045ff19c02 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 9 May 2019 09:41:23 -0600 Subject: [PATCH 13/67] Fix node close stopwatch usage (#41918) The close method in Node uses a StopWatch to time to closing of various services. However, the call to log the timing was made before any of the services had been closed and therefore no timing would be printed out. This change moves the timing log call to be a closeable that is the last item closed. --- server/src/main/java/org/elasticsearch/node/Node.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 42f80dbd87c..b79cad68dfa 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -839,13 +839,15 @@ public class Node implements Closeable { // Don't call shutdownNow here, it might break ongoing operations on Lucene indices. // See https://issues.apache.org/jira/browse/LUCENE-7248. We call shutdownNow in // awaitClose if the node doesn't finish closing within the specified time. - toClose.add(() -> stopWatch.stop()); + toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); + toClose.add(() -> stopWatch.stop().start("page_cache_recycler")); toClose.add(injector.getInstance(PageCacheRecycler.class)); + toClose.add(stopWatch::stop); if (logger.isTraceEnabled()) { - logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint()); + toClose.add(() -> logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint())); } IOUtils.close(toClose); logger.info("closed"); From 732ef15f0dbd8db572cdff75a54cfed1b0e09d9e Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 9 May 2019 13:10:19 -0400 Subject: [PATCH 14/67] [DOCS] Adds placeholder for 7.1.0 release notes (#42024) --- docs/reference/release-notes.asciidoc | 2 + docs/reference/release-notes/7.1.0.asciidoc | 52 +++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 docs/reference/release-notes/7.1.0.asciidoc diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index f3d62e889dc..ab5dc60b019 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. +* <> * <> * <> * <> @@ -15,6 +16,7 @@ This section summarizes the changes in each release. -- +include::release-notes/7.1.0.asciidoc[] include::release-notes/7.0.0.asciidoc[] include::release-notes/7.0.0-rc2.asciidoc[] include::release-notes/7.0.0-rc1.asciidoc[] diff --git a/docs/reference/release-notes/7.1.0.asciidoc b/docs/reference/release-notes/7.1.0.asciidoc new file mode 100644 index 00000000000..8ab37f875c2 --- /dev/null +++ b/docs/reference/release-notes/7.1.0.asciidoc @@ -0,0 +1,52 @@ +//// +// To add a release, copy and paste the following text, uncomment the relevant +// sections, and add a link to the new section in the list of releases in +// ../release-notes.asciidoc. Note that release subheads must be floated and +// sections cannot be empty. +// TEMPLATE + +// [[release-notes-n.n.n]] +// == {es} version n.n.n + +// coming[n.n.n] + +// Also see <>. + +// [float] +// [[breaking-n.n.n]] +// === Breaking Changes + +// [float] +// [[breaking-java-n.n.n]] +// === Breaking Java Changes + +// [float] +// [[deprecation-n.n.n]] +// === Deprecations + +// [float] +// [[feature-n.n.n]] +// === New Features + +// [float] +// [[enhancement-n.n.n]] +// === Enhancements + +// [float] +// [[bug-n.n.n]] +// === Bug Fixes + +// [float] +// [[regression-n.n.n]] +// === Regressions + +// [float] +// === Known Issues +//// + +[[release-notes-7.1.0]] +== {es} version 7.1.0 + +Also see <>. + +coming[7.1.0] \ No newline at end of file From bf5a40c75467cb87eeebad0715bdacf79f98b1e2 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 9 May 2019 14:28:50 -0500 Subject: [PATCH 15/67] [ML] relax set upgrade mode test to match what is guaranteed (#41958) (#41979) * [ML] relax set upgrade mode test to match what is guaranteed * removing unused import --- .../xpack/ml/integration/SetUpgradeModeIT.java | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java index 57c9245e2c5..f97c27e4ccc 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java @@ -25,6 +25,7 @@ import org.junit.After; import java.util.Collections; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.getDataCounts; @@ -33,7 +34,6 @@ import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.indexDocs; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.isEmptyString; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -88,12 +88,12 @@ public class SetUpgradeModeIT extends MlNativeAutodetectIntegTestCase { GetJobsStatsAction.Response.JobStats jobStats = getJobStats(jobId).get(0); assertThat(jobStats.getState(), equalTo(JobState.OPENED)); - assertThat(jobStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(jobStats.getAssignmentExplanation(), equalTo(AWAITING_UPGRADE.getExplanation())); assertThat(jobStats.getNode(), is(nullValue())); GetDatafeedsStatsAction.Response.DatafeedStats datafeedStats = getDatafeedStats(datafeedId); assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); - assertThat(datafeedStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(datafeedStats.getAssignmentExplanation(), equalTo(AWAITING_UPGRADE.getExplanation())); assertThat(datafeedStats.getNode(), is(nullValue())); Job.Builder job = createScheduledJob("job-should-not-open"); @@ -126,13 +126,11 @@ public class SetUpgradeModeIT extends MlNativeAutodetectIntegTestCase { jobStats = getJobStats(jobId).get(0); assertThat(jobStats.getState(), equalTo(JobState.OPENED)); - assertThat(jobStats.getAssignmentExplanation(), isEmptyString()); - assertThat(jobStats.getNode(), is(not(nullValue()))); + assertThat(jobStats.getAssignmentExplanation(), not(equalTo(AWAITING_UPGRADE.getExplanation()))); datafeedStats = getDatafeedStats(datafeedId); assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); - assertThat(datafeedStats.getAssignmentExplanation(), isEmptyString()); - assertThat(datafeedStats.getNode(), is(not(nullValue()))); + assertThat(datafeedStats.getAssignmentExplanation(), not(equalTo(AWAITING_UPGRADE.getExplanation()))); } private void startRealtime(String jobId) throws Exception { From b23b06ddedd72e9b8636be2f4bd430ea647f4bf6 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 9 May 2019 14:29:10 -0500 Subject: [PATCH 16/67] [ML] verify that there are no duplicate leaf fields in aggs (#41895) (#42025) * [ML] verify that there are no duplicate leaf fields in aggs * addressing pr comments * addressing PR comments * optmizing duplication check --- .../action/PutDataFrameTransformAction.java | 8 +- .../transforms/pivot/PivotConfig.java | 62 ++++++++++++++++ .../transforms/pivot/PivotConfigTests.java | 73 ++++++++++++++++++- .../test/data_frame/transforms_crud.yml | 32 ++++++++ 4 files changed, 172 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 0f6cc63f988..059bad3494c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfi import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class PutDataFrameTransformAction extends Action { public static final PutDataFrameTransformAction INSTANCE = new PutDataFrameTransformAction(); @@ -53,7 +55,11 @@ public class PutDataFrameTransformAction extends Action { @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + for(String failure : config.getPivotConfig().aggFieldValidation()) { + validationException = addValidationError(failure, validationException); + } + return validationException; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java index c1c894e2971..79a0a7fc1bf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java @@ -13,11 +13,17 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map.Entry; import java.util.Objects; @@ -141,7 +147,63 @@ public class PivotConfig implements Writeable, ToXContentObject { return groups.isValid() && aggregationConfig.isValid(); } + public List aggFieldValidation() { + if ((aggregationConfig.isValid() && groups.isValid()) == false) { + return Collections.emptyList(); + } + List usedNames = new ArrayList<>(); + // TODO this will need to change once we allow multi-bucket aggs + field merging + aggregationConfig.getAggregatorFactories().forEach(agg -> addAggNames(agg, usedNames)); + aggregationConfig.getPipelineAggregatorFactories().forEach(agg -> addAggNames(agg, usedNames)); + usedNames.addAll(groups.getGroups().keySet()); + return aggFieldValidation(usedNames); + } + public static PivotConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); } + + /** + * Does the following checks: + * + * - determines if there are any full duplicate names between the aggregation names and the group by names. + * - finds if there are conflicting name paths that could cause a failure later when the config is started. + * + * Examples showing conflicting field name paths: + * + * aggName1: foo.bar.baz + * aggName2: foo.bar + * + * This should fail as aggName1 will cause foo.bar to be an object, causing a conflict with the use of foo.bar in aggName2. + * @param usedNames The aggregation and group_by names + * @return List of validation failure messages + */ + static List aggFieldValidation(List usedNames) { + if (usedNames == null || usedNames.isEmpty()) { + return Collections.emptyList(); + } + List validationFailures = new ArrayList<>(); + + usedNames.sort(String::compareTo); + for (int i = 0; i < usedNames.size() - 1; i++) { + if (usedNames.get(i+1).startsWith(usedNames.get(i) + ".")) { + validationFailures.add("field [" + usedNames.get(i) + "] cannot be both an object and a field"); + } + if (usedNames.get(i+1).equals(usedNames.get(i))) { + validationFailures.add("duplicate field [" + usedNames.get(i) + "] detected"); + } + } + return validationFailures; + } + + + private static void addAggNames(AggregationBuilder aggregationBuilder, Collection names) { + names.add(aggregationBuilder.getName()); + aggregationBuilder.getSubAggregations().forEach(agg -> addAggNames(agg, names)); + aggregationBuilder.getPipelineAggregations().forEach(agg -> addAggNames(agg, names)); + } + + private static void addAggNames(PipelineAggregationBuilder pipelineAggregationBuilder, Collection names) { + names.add(pipelineAggregationBuilder.getName()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java index 1586ea540f4..342e007f212 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.XContentParser; @@ -13,6 +14,12 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.dataframe.transforms.AbstractSerializingDataFrameTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; public class PivotConfigTests extends AbstractSerializingDataFrameTestCase { @@ -103,7 +110,7 @@ public class PivotConfigTests extends AbstractSerializingDataFrameTestCase createPivotConfigFromString(pivot, false)); } - public void testDoubleAggs() throws IOException { + public void testDoubleAggs() { String pivot = "{" + " \"group_by\": {" + " \"id\": {" @@ -136,6 +143,68 @@ public class PivotConfigTests extends AbstractSerializingDataFrameTestCase createPivotConfigFromString(pivot, false)); } + public void testValidAggNames() throws IOException { + String pivotAggs = "{" + + " \"group_by\": {" + + " \"user.id.field\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg.field.value\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } }"; + PivotConfig pivotConfig = createPivotConfigFromString(pivotAggs, true); + assertTrue(pivotConfig.isValid()); + List fieldValidation = pivotConfig.aggFieldValidation(); + assertTrue(fieldValidation.isEmpty()); + } + + public void testAggNameValidationsWithoutIssues() { + String prefix = randomAlphaOfLength(10) + "1"; + String prefix2 = randomAlphaOfLength(10) + "2"; + String nestedField1 = randomAlphaOfLength(10) + "3"; + String nestedField2 = randomAlphaOfLength(10) + "4"; + + assertThat(PivotConfig.aggFieldValidation(Arrays.asList(prefix + nestedField1 + nestedField2, + prefix + nestedField1, + prefix, + prefix2)), is(empty())); + + assertThat(PivotConfig.aggFieldValidation( + Arrays.asList( + dotJoin(prefix, nestedField1, nestedField2), + dotJoin(nestedField1, nestedField2), + nestedField2, + prefix2)), is(empty())); + } + + public void testAggNameValidationsWithDuplicatesAndNestingIssues() { + String prefix = randomAlphaOfLength(10) + "1"; + String prefix2 = randomAlphaOfLength(10) + "2"; + String nestedField1 = randomAlphaOfLength(10) + "3"; + String nestedField2 = randomAlphaOfLength(10) + "4"; + + List failures = PivotConfig.aggFieldValidation( + Arrays.asList( + dotJoin(prefix, nestedField1, nestedField2), + dotJoin(prefix, nestedField2), + dotJoin(prefix, nestedField1), + dotJoin(prefix2, nestedField1), + dotJoin(prefix2, nestedField1), + prefix2)); + + assertThat(failures, + containsInAnyOrder("duplicate field [" + dotJoin(prefix2, nestedField1) + "] detected", + "field [" + prefix2 + "] cannot be both an object and a field", + "field [" + dotJoin(prefix, nestedField1) + "] cannot be both an object and a field")); + } + + private static String dotJoin(String... fields) { + return Strings.arrayToDelimitedString(fields, "."); + } + private PivotConfig createPivotConfigFromString(String json, boolean lenient) throws IOException { final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index fa608cefd1e..40af091a91b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -302,3 +302,35 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } +--- +"Test creation failures due to duplicate and conflicting field names": + - do: + catch: /duplicate field \[airline\] detected/ + data_frame.put_data_frame_transform: + transform_id: "duplicate-field-transform" + body: > + { + "source": { + "index": "source-index" + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"airline": {"avg": {"field": "responsetime"}}} + } + } + - do: + catch: /field \[airline\] cannot be both an object and a field/ + data_frame.put_data_frame_transform: + transform_id: "duplicate-field-transform" + body: > + { + "source": { + "index": "source-index" + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"airline.responsetime": {"avg": {"field": "responsetime"}}} + } + } From 99a50ac3b7cd0072c4b6e216286e02e1b670b109 Mon Sep 17 00:00:00 2001 From: Christian Mesh Date: Thu, 9 May 2019 18:14:23 -0400 Subject: [PATCH 17/67] Add painless string split function (splitOnToken) (#39772) Adds two String split functions to Painless that can be used without enabling regexes. --- .../packages.asciidoc | 2 + .../painless/api/Augmentation.java | 49 +++++++++++++++++++ .../elasticsearch/painless/spi/java.lang.txt | 2 + .../painless/AugmentationTests.java | 40 +++++++++++++++ 4 files changed, 93 insertions(+) diff --git a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc index f6921410512..75ad21ddc93 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc @@ -1253,6 +1253,8 @@ See the <> for a high-level overview * String {java11-javadoc}/java.base/java/lang/String.html#replace(java.lang.CharSequence,java.lang.CharSequence)[replace](CharSequence, CharSequence) * String replaceAll(Pattern, Function) * String replaceFirst(Pattern, Function) +* String[] splitOnToken(String) +* String[] splitOnToken(String, int) * boolean {java11-javadoc}/java.base/java/lang/String.html#startsWith(java.lang.String)[startsWith](String) * boolean {java11-javadoc}/java.base/java/lang/String.html#startsWith(java.lang.String,int)[startsWith](String, int) * CharSequence {java11-javadoc}/java.base/java/lang/CharSequence.html#subSequence(int,int)[subSequence](int, int) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java index 0b751b7d2f7..bbbbc3dfc37 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java @@ -503,4 +503,53 @@ public class Augmentation { public static String decodeBase64(String receiver) { return new String(Base64.getDecoder().decode(receiver.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); } + + /** + * Split 'receiver' by 'token' as many times as possible.. + */ + public static String[] splitOnToken(String receiver, String token) { + return splitOnToken(receiver, token, -1); + } + + /** + * Split 'receiver' by 'token' up to 'limit' times. Any limit less than 1 is ignored. + */ + public static String[] splitOnToken(String receiver, String token, int limit) { + // Check if it's even possible to perform a split + if (receiver == null || receiver.length() == 0 || token == null || token.length() == 0 || receiver.length() < token.length()) { + return new String[] { receiver }; + } + + // List of string segments we have found + ArrayList result = new ArrayList(); + + // Keep track of where we are in the string + // indexOf(tok, startPos) is faster than creating a new search context ever loop with substring(start, end) + int pos = 0; + + // Loop until we hit the limit or forever if we are passed in less than one (signifying no limit) + // If Integer.MIN_VALUE is passed in, it will still continue to loop down to 1 from MAX_VALUE + // This edge case should be fine as we are limited by receiver length (Integer.MAX_VALUE) even if we split at every char + for(;limit != 1; limit--) { + + // Find the next occurrence of token after current pos + int idx = receiver.indexOf(token, pos); + + // Reached the end of the string without another match + if (idx == -1) { + break; + } + + // Add the found segment to the result list + result.add(receiver.substring(pos, idx)); + + // Move our search position to the next possible location + pos = idx + token.length(); + } + // Add the remaining string to the result list + result.add(receiver.substring(pos)); + + // O(N) or faster depending on implementation + return result.toArray(new String[0]); + } } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt index ef2d462127f..63ed6d41c67 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt @@ -758,6 +758,8 @@ class java.lang.String { String copyValueOf(char[],int,int) String org.elasticsearch.painless.api.Augmentation decodeBase64() String org.elasticsearch.painless.api.Augmentation encodeBase64() + String[] org.elasticsearch.painless.api.Augmentation splitOnToken(String) + String[] org.elasticsearch.painless.api.Augmentation splitOnToken(String, int) boolean endsWith(String) boolean equalsIgnoreCase(String) String format(Locale,String,def[]) diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index a0d1c5a5891..70fbb733e2f 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; public class AugmentationTests extends ScriptTestCase { @@ -199,4 +200,43 @@ public class AugmentationTests extends ScriptTestCase { assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" + " ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); } + + private static class SplitCase { + final String input; + final String token; + final int count; + + SplitCase(String input, String token, int count) { + this.input = input; + this.token = token; + this.count = count; + } + SplitCase(String input, String token) { + this(input, token, -1); + } + } + public void testString_SplitOnToken() { + SplitCase[] cases = new SplitCase[] { + new SplitCase("", ""), + new SplitCase("a,b,c", ","), + new SplitCase("a,b,c", "|"), + new SplitCase("a,,b,,c", ","), + new SplitCase("a,,b,,c", ",", 1), + new SplitCase("a,,b,,c", ",", 3), + new SplitCase("a,,b,,c", ",", 300), + new SplitCase("a,b,c", "a,b,c,d"), + new SplitCase("aaaaaaa", "a"), + new SplitCase("aaaaaaa", "a", 2), + new SplitCase("1.1.1.1.111", "1"), + new SplitCase("1.1.1.1.111", "."), + new SplitCase("1\n1.1.\r\n1\r\n111", "\r\n"), + }; + for (SplitCase split : cases) { + //System.out.println(String.format("Splitting '%s' by '%s' %d times", split.input, split.token, split.count)); + assertArrayEquals( + split.input.split(Pattern.quote(split.token), split.count), + (String[])exec("return \""+split.input+"\".splitOnToken(\""+split.token+"\", "+split.count+");") + ); + } + } } From 564019763282488dfdcb4ef579c1b8b9494c1e11 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 9 May 2019 22:08:31 -0700 Subject: [PATCH 18/67] Refactor TransportSingleShardAction to serialize Writeable responses (#41985) (#42040) Previously, TransportSingleShardAction required constructing a new empty response object. This response object's Streamable readFrom was used. As part of the migration to Writeable, the interface here was updated to leverage Writeable.Reader. relates to #34389. --- .../action/PainlessExecuteAction.java | 16 +++--- .../action/PainlessExecuteResponseTests.java | 50 +++++++++++++++++-- .../indices/analyze/AnalyzeResponse.java | 5 +- .../analyze/TransportAnalyzeAction.java | 5 -- .../mapping/get/GetFieldMappingsAction.java | 8 ++- .../mapping/get/GetFieldMappingsResponse.java | 44 +++++++++------- .../TransportGetFieldMappingsIndexAction.java | 5 +- .../action/explain/ExplainAction.java | 8 ++- .../action/explain/ExplainResponse.java | 27 ++++++---- .../explain/TransportExplainAction.java | 5 +- ...TransportFieldCapabilitiesIndexAction.java | 5 +- .../elasticsearch/action/get/GetAction.java | 8 ++- .../elasticsearch/action/get/GetResponse.java | 7 +-- .../action/get/MultiGetItemResponse.java | 5 +- .../action/get/MultiGetShardResponse.java | 50 ++++++++++--------- .../action/get/TransportGetAction.java | 5 +- .../get/TransportShardMultiGetAction.java | 5 +- .../shard/TransportSingleShardAction.java | 11 +--- .../MultiTermVectorsItemResponse.java | 5 +- .../MultiTermVectorsShardResponse.java | 50 ++++++++++--------- .../action/termvectors/TermVectorsAction.java | 8 ++- .../termvectors/TermVectorsResponse.java | 26 ++++++---- .../TransportShardMultiTermsVectorAction.java | 5 +- .../TransportTermVectorsAction.java | 5 +- .../index/seqno/RetentionLeaseActions.java | 15 +++++- .../get/GetFieldMappingsResponseTests.java | 16 +++--- .../action/explain/ExplainResponseTests.java | 10 ++-- .../termvectors/TermVectorsUnitTests.java | 6 +-- .../xpack/ccr/action/ShardChangesAction.java | 34 ++++++++----- .../PutCcrRestoreSessionAction.java | 4 +- .../ccr/action/ShardChangesResponseTests.java | 10 ++-- 31 files changed, 280 insertions(+), 183 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index e8d93b8ef77..eda62f7e5eb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -103,7 +103,7 @@ public class PainlessExecuteAction extends Action implements ToXContentObject { @@ -388,20 +388,22 @@ public class PainlessExecuteAction extends Action getResponseReader() { + return Response::new; } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java index c75497bd630..ed75caff76b 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java @@ -18,17 +18,57 @@ */ package org.elasticsearch.painless.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; -public class PainlessExecuteResponseTests extends AbstractStreamableTestCase { +import java.io.IOException; + +public class PainlessExecuteResponseTests extends AbstractSerializingTestCase { @Override - protected PainlessExecuteAction.Response createBlankInstance() { - return new PainlessExecuteAction.Response(); + protected Writeable.Reader instanceReader() { + return PainlessExecuteAction.Response::new; } @Override protected PainlessExecuteAction.Response createTestInstance() { - return new PainlessExecuteAction.Response(randomAlphaOfLength(10)); + Object result; + switch (randomIntBetween(0, 2)) { + case 0: + result = randomAlphaOfLength(10); + break; + case 1: + result = randomBoolean(); + break; + case 2: + result = randomDoubleBetween(-10, 10, true); + break; + default: + throw new IllegalStateException("invalid branch"); + } + return new PainlessExecuteAction.Response(result); + } + + @Override + protected PainlessExecuteAction.Response doParseInstance(XContentParser parser) throws IOException { + parser.nextToken(); // START-OBJECT + parser.nextToken(); // FIELD-NAME + XContentParser.Token token = parser.nextToken(); // result value + Object result; + switch (token) { + case VALUE_STRING: + result = parser.text(); + break; + case VALUE_BOOLEAN: + result = parser.booleanValue(); + break; + case VALUE_NUMBER: + result = parser.doubleValue(); + break; + default: + throw new IOException("invalid response"); + } + return new PainlessExecuteAction.Response(result); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index 945c2128bab..7e6d525cefb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -71,7 +71,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable attributes) { this.term = term; this.position = position; @@ -82,7 +82,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable tokens; public AnalyzeResponse(List tokens, DetailAnalyzeResponse detail) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 07f445b6fc7..62d8c0e91da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -96,11 +96,6 @@ public class TransportAnalyzeAction extends TransportSingleShardAction getResponseReader() { return AnalyzeResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index 5aa19652b67..d372d8cf93f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class GetFieldMappingsAction extends Action { @@ -32,6 +33,11 @@ public class GetFieldMappingsAction extends Action { @Override public GetFieldMappingsResponse newResponse() { - return new GetFieldMappingsResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return GetFieldMappingsResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d3200bc1e1d..e3be9e68342 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -92,9 +92,33 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte this.mappings = mappings; } + GetFieldMappingsResponse() { } + GetFieldMappingsResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + Map>> indexMapBuilder = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String index = in.readString(); + int typesSize = in.readVInt(); + Map> typeMapBuilder = new HashMap<>(typesSize); + for (int j = 0; j < typesSize; j++) { + String type = in.readString(); + int fieldSize = in.readVInt(); + Map fieldMapBuilder = new HashMap<>(fieldSize); + for (int k = 0; k < fieldSize; k++) { + fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference())); + } + typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder)); + } + indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder)); + } + mappings = unmodifiableMap(indexMapBuilder); + + } + /** returns the retrieved field mapping. The return map keys are index, type, field (as specified in the request). */ public Map>> mappings() { return mappings; @@ -269,25 +293,7 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - Map>> indexMapBuilder = new HashMap<>(size); - for (int i = 0; i < size; i++) { - String index = in.readString(); - int typesSize = in.readVInt(); - Map> typeMapBuilder = new HashMap<>(typesSize); - for (int j = 0; j < typesSize; j++) { - String type = in.readString(); - int fieldSize = in.readVInt(); - Map fieldMapBuilder = new HashMap<>(fieldSize); - for (int k = 0; k < fieldSize; k++) { - fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference())); - } - typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder)); - } - indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder)); - } - mappings = unmodifiableMap(indexMapBuilder); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index c7415391675..61a598c361c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; @@ -123,8 +124,8 @@ public class TransportGetFieldMappingsIndexAction } @Override - protected GetFieldMappingsResponse newResponse() { - return new GetFieldMappingsResponse(); + protected Writeable.Reader getResponseReader() { + return GetFieldMappingsResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java index 13c9d94e7db..ba5618ce7de 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.explain; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; /** * Entry point for the explain feature. @@ -35,6 +36,11 @@ public class ExplainAction extends Action { @Override public ExplainResponse newResponse() { - return new ExplainResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return ExplainResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 5cecdf3a8b6..8ea7d0f12e3 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -60,6 +60,7 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO private Explanation explanation; private GetResult getResult; + // TODO(talevy): remove dependency on empty constructor from ExplainResponseTests ExplainResponse() { } @@ -80,6 +81,20 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO this.getResult = getResult; } + public ExplainResponse(StreamInput in) throws IOException { + super(in); + index = in.readString(); + type = in.readString(); + id = in.readString(); + exists = in.readBoolean(); + if (in.readBoolean()) { + explanation = readExplanation(in); + } + if (in.readBoolean()) { + getResult = GetResult.readGetResult(in); + } + } + public String getIndex() { return index; } @@ -123,17 +138,7 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - type = in.readString(); - id = in.readString(); - exists = in.readBoolean(); - if (in.readBoolean()) { - explanation = readExplanation(in); - } - if (in.readBoolean()) { - getResult = GetResult.readGetResult(in); - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index fe847532259..c29da21fe4a 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -152,8 +153,8 @@ public class TransportExplainAction extends TransportSingleShardAction getResponseReader() { + return ExplainResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 01c21544047..274633b12a6 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; @@ -114,8 +115,8 @@ public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardA } @Override - protected FieldCapabilitiesIndexResponse newResponse() { - return new FieldCapabilitiesIndexResponse(); + protected Writeable.Reader getResponseReader() { + return FieldCapabilitiesIndexResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/GetAction.java b/server/src/main/java/org/elasticsearch/action/get/GetAction.java index a622fd5a817..05d1b6c5a4c 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class GetAction extends Action { @@ -32,6 +33,11 @@ public class GetAction extends Action { @Override public GetResponse newResponse() { - return new GetResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return GetResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index b9383785678..3d340d455ce 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -48,7 +48,9 @@ public class GetResponse extends ActionResponse implements Iterable responses; - List failures; + final IntArrayList locations; + final List responses; + final List failures; MultiGetShardResponse() { locations = new IntArrayList(); @@ -40,6 +40,27 @@ public class MultiGetShardResponse extends ActionResponse { failures = new ArrayList<>(); } + MultiGetShardResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + locations = new IntArrayList(size); + responses = new ArrayList<>(size); + failures = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + locations.add(in.readVInt()); + if (in.readBoolean()) { + responses.add(new GetResponse(in)); + } else { + responses.add(null); + } + if (in.readBoolean()) { + failures.add(MultiGetResponse.Failure.readFailure(in)); + } else { + failures.add(null); + } + } + } + public void add(int location, GetResponse response) { locations.add(location); responses.add(response); @@ -54,26 +75,7 @@ public class MultiGetShardResponse extends ActionResponse { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - locations = new IntArrayList(size); - responses = new ArrayList<>(size); - failures = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - locations.add(in.readVInt()); - if (in.readBoolean()) { - GetResponse response = new GetResponse(); - response.readFrom(in); - responses.add(response); - } else { - responses.add(null); - } - if (in.readBoolean()) { - failures.add(MultiGetResponse.Failure.readFailure(in)); - } else { - failures.add(null); - } - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -96,4 +98,4 @@ public class MultiGetShardResponse extends ActionResponse { } } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 7bc736b69f3..65f42835f73 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.IndexShard; @@ -108,8 +109,8 @@ public class TransportGetAction extends TransportSingleShardAction getResponseReader() { + return GetResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 6c48b3b87c5..9b8ea6bd6ca 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.IndexShard; @@ -57,8 +58,8 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction getResponseReader() { + return MultiGetShardResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 8b0e69bd457..3c2e7f9a49e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -120,16 +120,7 @@ public abstract class TransportSingleShardAction getResponseReader() { - return in -> { - Response response = newResponse(); - response.readFrom(in); - return response; - }; - } + protected abstract Writeable.Reader getResponseReader(); protected abstract boolean resolveIndex(Request request); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java index 3e32af7f2c2..14ac59cb132 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -105,8 +105,7 @@ public class MultiTermVectorsItemResponse implements Streamable { if (in.readBoolean()) { failure = MultiTermVectorsResponse.Failure.readFailure(in); } else { - response = new TermVectorsResponse(); - response.readFrom(in); + response = new TermVectorsResponse(in); } } @@ -120,4 +119,4 @@ public class MultiTermVectorsItemResponse implements Streamable { response.writeTo(out); } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java index 346274c5925..2290ee9f52e 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java @@ -30,9 +30,9 @@ import java.util.List; public class MultiTermVectorsShardResponse extends ActionResponse { - IntArrayList locations; - List responses; - List failures; + final IntArrayList locations; + final List responses; + final List failures; MultiTermVectorsShardResponse() { locations = new IntArrayList(); @@ -40,6 +40,27 @@ public class MultiTermVectorsShardResponse extends ActionResponse { failures = new ArrayList<>(); } + MultiTermVectorsShardResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + locations = new IntArrayList(size); + responses = new ArrayList<>(size); + failures = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + locations.add(in.readVInt()); + if (in.readBoolean()) { + responses.add(new TermVectorsResponse(in)); + } else { + responses.add(null); + } + if (in.readBoolean()) { + failures.add(MultiTermVectorsResponse.Failure.readFailure(in)); + } else { + failures.add(null); + } + } + } + public void add(int location, TermVectorsResponse response) { locations.add(location); responses.add(response); @@ -54,26 +75,7 @@ public class MultiTermVectorsShardResponse extends ActionResponse { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - locations = new IntArrayList(size); - responses = new ArrayList<>(size); - failures = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - locations.add(in.readVInt()); - if (in.readBoolean()) { - TermVectorsResponse response = new TermVectorsResponse(); - response.readFrom(in); - responses.add(response); - } else { - responses.add(null); - } - if (in.readBoolean()) { - failures.add(MultiTermVectorsResponse.Failure.readFailure(in)); - } else { - failures.add(null); - } - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -96,4 +98,4 @@ public class MultiTermVectorsShardResponse extends ActionResponse { } } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java index e701efe93ba..9b223eed3a3 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class TermVectorsAction extends Action { @@ -32,6 +33,11 @@ public class TermVectorsAction extends Action { @Override public TermVectorsResponse newResponse() { - return new TermVectorsResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return TermVectorsResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 9159a07e83c..3d0fb75f8d3 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -103,6 +103,20 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj TermVectorsResponse() { } + TermVectorsResponse(StreamInput in) throws IOException { + index = in.readString(); + type = in.readString(); + id = in.readString(); + docVersion = in.readVLong(); + exists = in.readBoolean(); + artificial = in.readBoolean(); + tookInMillis = in.readVLong(); + if (in.readBoolean()) { + headerRef = in.readBytesReference(); + termVectors = in.readBytesReference(); + } + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); @@ -127,17 +141,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj @Override public void readFrom(StreamInput in) throws IOException { - index = in.readString(); - type = in.readString(); - id = in.readString(); - docVersion = in.readVLong(); - exists = in.readBoolean(); - artificial = in.readBoolean(); - tookInMillis = in.readVLong(); - if (in.readBoolean()) { - headerRef = in.readBytesReference(); - termVectors = in.readBytesReference(); - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } public Fields getFields() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index e8d6c1bcb4f..0292757121e 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -58,8 +59,8 @@ public class TransportShardMultiTermsVectorAction extends } @Override - protected MultiTermVectorsShardResponse newResponse() { - return new MultiTermVectorsShardResponse(); + protected Writeable.Reader getResponseReader() { + return MultiTermVectorsShardResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index d87a08a0541..0e212ab7cce 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -110,8 +111,8 @@ public class TransportTermVectorsAction extends TransportSingleShardAction getResponseReader() { + return TermVectorsResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index c503f1fa163..98d6c19dea6 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -119,8 +120,8 @@ public class RetentionLeaseActions { abstract void doRetentionLeaseAction(IndexShard indexShard, T request, ActionListener listener); @Override - protected Response newResponse() { - return new Response(); + protected Writeable.Reader getResponseReader() { + return Response::new; } @Override @@ -169,6 +170,10 @@ public class RetentionLeaseActions { ActionListener.map(listener, r -> new Response())); } + @Override + protected Writeable.Reader getResponseReader() { + return Response::new; + } } @Override @@ -392,6 +397,12 @@ public class RetentionLeaseActions { public static class Response extends ActionResponse { + public Response() { + } + + Response(StreamInput in) throws IOException { + super(in); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java index 2b8db458eb8..472b4ddb4a3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java @@ -23,11 +23,12 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRespon import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.Collections; @@ -38,7 +39,7 @@ import java.util.function.Predicate; import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; import static org.hamcrest.CoreMatchers.equalTo; -public class GetFieldMappingsResponseTests extends AbstractStreamableXContentTestCase { +public class GetFieldMappingsResponseTests extends AbstractSerializingTestCase { public void testManualSerialization() throws IOException { Map>> mappings = new HashMap<>(); @@ -48,9 +49,8 @@ public class GetFieldMappingsResponseTests extends AbstractStreamableXContentTes try (BytesStreamOutput out = new BytesStreamOutput()) { response.writeTo(out); - GetFieldMappingsResponse serialized = new GetFieldMappingsResponse(); try (StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes)) { - serialized.readFrom(in); + GetFieldMappingsResponse serialized = new GetFieldMappingsResponse(in); FieldMappingMetaData metaData = serialized.fieldMappings("index", "type", "field"); assertNotNull(metaData); assertEquals(new BytesArray("{}"), metaData.getSource()); @@ -106,13 +106,13 @@ public class GetFieldMappingsResponseTests extends AbstractStreamableXContentTes } @Override - protected GetFieldMappingsResponse createBlankInstance() { - return new GetFieldMappingsResponse(); + protected GetFieldMappingsResponse createTestInstance() { + return new GetFieldMappingsResponse(randomMapping()); } @Override - protected GetFieldMappingsResponse createTestInstance() { - return new GetFieldMappingsResponse(randomMapping()); + protected Writeable.Reader instanceReader() { + return GetFieldMappingsResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java index 2a04a976677..9f1ee08844b 100644 --- a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java @@ -23,13 +23,14 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.RandomObjects; import java.io.IOException; @@ -42,15 +43,16 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; -public class ExplainResponseTests extends AbstractStreamableXContentTestCase { +public class ExplainResponseTests extends AbstractSerializingTestCase { + @Override protected ExplainResponse doParseInstance(XContentParser parser) throws IOException { return ExplainResponse.fromXContent(parser, randomBoolean()); } @Override - protected ExplainResponse createBlankInstance() { - return new ExplainResponse(); + protected Writeable.Reader instanceReader() { + return ExplainResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 0cd9d3130f1..8ab452950a8 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -77,8 +77,7 @@ public class TermVectorsUnitTests extends ESTestCase { // read ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer); - TermVectorsResponse inResponse = new TermVectorsResponse("a", "b", "c"); - inResponse.readFrom(esBuffer); + TermVectorsResponse inResponse = new TermVectorsResponse(esBuffer); // see if correct checkIfStandardTermVector(inResponse); @@ -93,8 +92,7 @@ public class TermVectorsUnitTests extends ESTestCase { // read esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); esBuffer = new InputStreamStreamInput(esInBuffer); - inResponse = new TermVectorsResponse("a", "b", "c"); - inResponse.readFrom(esBuffer); + inResponse = new TermVectorsResponse(esBuffer); assertTrue(inResponse.isExists()); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 20b13474afa..584f0fd37aa 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -61,7 +62,12 @@ public class ShardChangesAction extends Action { @Override public Response newResponse() { - return new Response(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return Response::new; } public static class Request extends SingleShardRequest { @@ -246,6 +252,17 @@ public class ShardChangesAction extends Action { Response() { } + Response(StreamInput in) throws IOException { + super(in); + mappingVersion = in.readVLong(); + settingsVersion = in.readVLong(); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); + maxSeqNoOfUpdatesOrDeletes = in.readZLong(); + operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); + tookInMillis = in.readVLong(); + } + Response( final long mappingVersion, final long settingsVersion, @@ -265,15 +282,8 @@ public class ShardChangesAction extends Action { } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - mappingVersion = in.readVLong(); - settingsVersion = in.readVLong(); - globalCheckpoint = in.readZLong(); - maxSeqNo = in.readZLong(); - maxSeqNoOfUpdatesOrDeletes = in.readZLong(); - operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); - tookInMillis = in.readVLong(); + public void readFrom(final StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -459,8 +469,8 @@ public class ShardChangesAction extends Action { } @Override - protected Response newResponse() { - return new Response(); + protected Writeable.Reader getResponseReader() { + return Response::new; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java index 393548225a0..91ec057ac4e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java @@ -77,8 +77,8 @@ public class PutCcrRestoreSessionAction extends Action getResponseReader() { + return PutCcrRestoreSessionResponse::new; } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java index 0e48fc8e57c..a5b28caf9df 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class ShardChangesResponseTests extends AbstractStreamableTestCase { +public class ShardChangesResponseTests extends AbstractWireSerializingTestCase { @Override protected ShardChangesAction.Response createTestInstance() { @@ -34,8 +35,7 @@ public class ShardChangesResponseTests extends AbstractStreamableTestCase instanceReader() { + return ShardChangesAction.Response::new; } - } From 46e0fa2dba953cd9bf9382e449abf34345f26d0e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 9 May 2019 22:12:45 -0700 Subject: [PATCH 19/67] Improve jdk download tests (#42034) This commit reworks the tests for jdk download to test the old and new url pattern from oracle. Additionally it limits to one repository created per version, based on the old or new pattern, and restricts other repositories from trying to resolve jdks. closes #41998 --- .../gradle/JdkDownloadPlugin.java | 52 +++++++++++------- .../gradle/JdkDownloadPluginIT.java | 47 ++++++++++------ ...4_bin.tar.gz => fake_openjdk_linux.tar.gz} | Bin ...x64_bin.tar.gz => fake_openjdk_osx.tar.gz} | Bin ...s-x64_bin.zip => fake_openjdk_windows.zip} | Bin 5 files changed, 64 insertions(+), 35 deletions(-) rename buildSrc/src/test/resources/org/elasticsearch/gradle/{openjdk-1.0.2_linux-x64_bin.tar.gz => fake_openjdk_linux.tar.gz} (100%) rename buildSrc/src/test/resources/org/elasticsearch/gradle/{openjdk-1.0.2_osx-x64_bin.tar.gz => fake_openjdk_osx.tar.gz} (100%) rename buildSrc/src/test/resources/org/elasticsearch/gradle/{openjdk-1.0.2_windows-x64_bin.zip => fake_openjdk_windows.zip} (100%) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index a6372dfd231..a408b66ec81 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -27,6 +27,7 @@ import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ConfigurationContainer; import org.gradle.api.artifacts.dsl.DependencyHandler; +import org.gradle.api.artifacts.dsl.RepositoryHandler; import org.gradle.api.artifacts.repositories.IvyArtifactRepository; import org.gradle.api.file.CopySpec; import org.gradle.api.file.FileTree; @@ -45,6 +46,8 @@ import java.util.regex.Matcher; public class JdkDownloadPlugin implements Plugin { + private static final String REPO_NAME_PREFIX = "jdk_repo_"; + @Override public void apply(Project project) { NamedDomainObjectContainer jdksContainer = project.container(Jdk.class, name -> @@ -69,6 +72,13 @@ public class JdkDownloadPlugin implements Plugin { setupRootJdkDownload(project.getRootProject(), platform, version); } }); + + // all other repos should ignore the special jdk artifacts + project.getRootProject().getRepositories().all(repo -> { + if (repo.getName().startsWith(REPO_NAME_PREFIX) == false) { + repo.content(content -> content.excludeGroup("jdk")); + } + }); } private static void setupRootJdkDownload(Project rootProject, String platform, String version) { @@ -94,26 +104,30 @@ public class JdkDownloadPlugin implements Plugin { String hash = jdkVersionMatcher.group(5); // add fake ivy repo for jdk url - String repoName = "jdk_repo_" + version; + String repoName = REPO_NAME_PREFIX + version; + RepositoryHandler repositories = rootProject.getRepositories(); if (rootProject.getRepositories().findByName(repoName) == null) { - // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back - rootProject.getRepositories().ivy(ivyRepo -> { - ivyRepo.setName(repoName); - ivyRepo.setUrl("https://download.oracle.com"); - ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> - layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); - ivyRepo.content(content -> content.includeGroup("jdk")); - }); - // current pattern since 12.0.1 - rootProject.getRepositories().ivy(ivyRepo -> { - ivyRepo.setName(repoName + "_with_hash"); - ivyRepo.setUrl("https://download.oracle.com"); - ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> layout.artifact( - "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); - ivyRepo.content(content -> content.includeGroup("jdk")); - }); + if (hash != null) { + // current pattern since 12.0.1 + repositories.ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> layout.artifact( + "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + } else { + // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back + repositories.ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> + layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + } } // add the jdk as a "dependency" diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java index 5f982e1b47d..9d612da610a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java @@ -41,20 +41,33 @@ import static org.hamcrest.CoreMatchers.equalTo; public class JdkDownloadPluginIT extends GradleIntegrationTestCase { - private static final String FAKE_JDK_VERSION = "1.0.2+99"; + private static final String OLD_JDK_VERSION = "1+99"; + private static final String JDK_VERSION = "12.0.1+99@123456789123456789123456789abcde"; private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)"); private static final Pattern NUM_CONFIGS_LOGLINE = Pattern.compile("NUM CONFIGS: (.*)"); public void testLinuxExtraction() throws IOException { - assertExtraction("getLinuxJdk", "linux", "bin/java"); + assertExtraction("getLinuxJdk", "linux", "bin/java", JDK_VERSION); } public void testDarwinExtraction() throws IOException { - assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java"); + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", JDK_VERSION); } public void testWindowsExtraction() throws IOException { - assertExtraction("getWindowsJdk", "windows", "bin/java"); + assertExtraction("getWindowsJdk", "windows", "bin/java", JDK_VERSION); + } + + public void testLinuxExtractionOldVersion() throws IOException { + assertExtraction("getLinuxJdk", "linux", "bin/java", OLD_JDK_VERSION); + } + + public void testDarwinExtractionOldVersion() throws IOException { + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", OLD_JDK_VERSION); + } + + public void testWindowsExtractionOldVersion() throws IOException { + assertExtraction("getWindowsJdk", "windows", "bin/java", OLD_JDK_VERSION); } public void testCrossProjectReuse() throws IOException { @@ -62,39 +75,41 @@ public class JdkDownloadPluginIT extends GradleIntegrationTestCase { Matcher matcher = NUM_CONFIGS_LOGLINE.matcher(result.getOutput()); assertTrue("could not find num configs in output: " + result.getOutput(), matcher.find()); assertThat(Integer.parseInt(matcher.group(1)), equalTo(6)); // 3 import configs, 3 export configs - }); + }, JDK_VERSION); } - public void assertExtraction(String taskname, String platform, String javaBin) throws IOException { + public void assertExtraction(String taskname, String platform, String javaBin, String version) throws IOException { runBuild(taskname, platform, result -> { Matcher matcher = JDK_HOME_LOGLINE.matcher(result.getOutput()); assertTrue("could not find jdk home in output: " + result.getOutput(), matcher.find()); String jdkHome = matcher.group(1); Path javaPath = Paths.get(jdkHome, javaBin); assertTrue(javaPath.toString(), Files.exists(javaPath)); - }); + }, version); } - private void runBuild(String taskname, String platform, Consumer assertions) throws IOException { + private void runBuild(String taskname, String platform, Consumer assertions, String version) throws IOException { WireMockServer wireMock = new WireMockServer(0); try { String extension = platform.equals("windows") ? "zip" : "tar.gz"; - String filename = "openjdk-1.0.2_" + platform + "-x64_bin." + extension; - wireMock.stubFor(head(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) - .willReturn(aResponse().withStatus(200))); + boolean isOld = version.equals(OLD_JDK_VERSION); + String filename = "openjdk-" + (isOld ? "1" : "12.0.1") + "_" + platform + "-x64_bin." + extension; final byte[] filebytes; - try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream(filename)) { + try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream("fake_openjdk_" + platform + "." + extension)) { filebytes = stream.readAllBytes(); } - wireMock.stubFor(get(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) - .willReturn(aResponse().withStatus(200).withBody(filebytes))); + String versionPath = isOld ? "jdk1/99" : "jdk12.0.1/123456789123456789123456789abcde/99"; + String urlPath = "/java/GA/" + versionPath + "/GPL/" + filename; + wireMock.stubFor(head(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200))); + wireMock.stubFor(get(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200).withBody(filebytes))); wireMock.start(); GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download")) .withArguments(taskname, "-Dlocal.repo.path=" + getLocalTestRepoPath(), - "-Dtests.jdk_version=" + FAKE_JDK_VERSION, - "-Dtests.jdk_repo=" + wireMock.baseUrl()) + "-Dtests.jdk_version=" + version, + "-Dtests.jdk_repo=" + wireMock.baseUrl(), + "-i") .withPluginClasspath(); BuildResult result = runner.build(); diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_linux.tar.gz similarity index 100% rename from buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz rename to buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_linux.tar.gz diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_osx.tar.gz similarity index 100% rename from buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz rename to buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_osx.tar.gz diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_windows.zip similarity index 100% rename from buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip rename to buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_windows.zip From 0e3617e0ee7f7718fd4079dd5d67c3667bc3dee0 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 9 May 2019 17:33:49 +0300 Subject: [PATCH 20/67] mute failing test Tracked in #41256 --- .../gradle/testclusters/TestClustersPluginIT.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 9276e8d215c..84b13340c35 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Ignore; import java.util.Arrays; @@ -81,6 +82,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { ); } + @Ignore // https://github.com/elastic/elasticsearch/issues/41256 public void testMultiProject() { BuildResult result = getTestClustersRunner( "user1", "user2", "-s", "-i", "--parallel", "-Dlocal.repo.path=" + getLocalTestRepoPath() @@ -158,6 +160,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { ); } + @Ignore // https://github.com/elastic/elasticsearch/issues/41256 public void testMultiNode() { BuildResult result = getTestClustersRunner(":multiNode").build(); assertTaskSuccessful(result, ":multiNode"); From 906999f1b61d8d0213352acbc0ebf9be36c9cafa Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 7 May 2019 22:50:11 -0700 Subject: [PATCH 21/67] Disable rhel8 in packaging tests (#41924) This commit disables rhel 8 from being tested in vagrant packaging tests. The vagrant image we use is beta release, but RHEL 8 was just released, which has caused the package mirrors for the beta to stop working. --- .../org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 0262c7d8151..30a8052b3f3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -30,7 +30,7 @@ class VagrantTestPlugin implements Plugin { 'oel-6', 'oel-7', 'opensuse-42', - 'rhel-8', + /* TODO: need a real RHEL license now that it is out of beta 'rhel-8',*/ 'sles-12', 'ubuntu-1604', 'ubuntu-1804' From dc444cef493f405883e58aa2a2b61d24c1e0e96d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 10 May 2019 09:29:27 +0200 Subject: [PATCH 22/67] Fix Race in Closing IndicesService.CacheCleaner (#42016) (#42052) * When close becomes true while the management pool is shut down, we run into an unhandled `EsRejectedExecutionException` that fails tests * Found this while trying to reproduce #32506 * Running the IndexStatsIT in a loop is a way of reproducing this --- .../java/org/elasticsearch/indices/IndicesService.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index adcc70d741b..cacc95115d3 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -65,6 +65,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -1225,7 +1226,13 @@ public class IndicesService extends AbstractLifecycleComponent } // Reschedule itself to run again if not closed if (closed.get() == false) { - threadPool.schedule(this, interval, ThreadPool.Names.SAME); + try { + threadPool.schedule(this, interval, ThreadPool.Names.SAME); + } catch (EsRejectedExecutionException e) { + if (closed.get() == false) { + throw e; + } + } } } From ea7db2bb6ad6bf91906a3deeb1f4fbb304618038 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 10 May 2019 11:59:20 +0200 Subject: [PATCH 23/67] Fix testCloseOrDeleteIndexDuringSnapshot (#42007) * This test was resulting in a `PARTIAL` instead of a `SUCCESS` state for the case of closing an index during snapshotting on 7.x * The reason for this is the changed default behaviour regarding waiting for active shards between 8.0 and 7.x * Fixed by adjusting the waiting behaviour on the close index request in the test * Closes #39828 --- .../elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index ee6b56e81fe..001a83710dc 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2483,7 +2483,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } else { logger.info("--> close index while partial snapshot is running"); closedOnPartial = true; - client.admin().indices().prepareClose("test-idx-1").get(); + client.admin().indices().prepareClose("test-idx-1").setWaitForActiveShards(ActiveShardCount.DEFAULT).get(); } } else { // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed From db8fe1de003c031e5efaa32ddc6454dfb366732e Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 10 May 2019 14:24:42 +0300 Subject: [PATCH 24/67] Fix slow sync test clustres artifacts task (#42012) * Fix slow sync test clustres artifacts task The task was mistakenly adding a combinational explosion of task actions all doing the same thing. With this PR this is fixed and each version - distribution pair is only extracted once. I appologieze for the SSD wear. * Look for configurations on the root project * Add dependency on configurations * This should be a `copy` so we don't blow away all the other distros * Don't copy example plugin build directory in integration tests --- .../testclusters/TestClustersPlugin.java | 61 +++++++++++-------- .../gradle/BuildExamplePluginsIT.java | 3 +- 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index c1ed6b770f0..daca1f5ebb1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -182,8 +182,9 @@ public class TestClustersPlugin implements Plugin { claimsInventory.put(elasticsearchCluster, claimsInventory.getOrDefault(elasticsearchCluster, 0) + 1); } })); - - logger.info("Claims inventory: {}", claimsInventory); + if (claimsInventory.isEmpty() == false) { + logger.info("Claims inventory: {}", claimsInventory); + } }); } @@ -279,8 +280,14 @@ public class TestClustersPlugin implements Plugin { // the clusters will look for artifacts there based on the naming conventions. // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in // the build. - Task sync = Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> { + Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> { onCreate.getOutputs().dir(getExtractDir(rootProject)); + onCreate.getInputs().files( + project.getRootProject().getConfigurations().matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) + ); + onCreate.dependsOn(project.getRootProject().getConfigurations() + .matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) + ); // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) onCreate.doFirst(new Action() { @Override @@ -290,6 +297,31 @@ public class TestClustersPlugin implements Plugin { project.delete(getExtractDir(rootProject)); } }); + onCreate.doLast(new Action() { + @Override + public void execute(Task task) { + project.getRootProject().getConfigurations() + .matching(config -> config.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) + .forEach(config -> project.copy(spec -> + config.getResolvedConfiguration() + .getResolvedArtifacts() + .forEach(resolvedArtifact -> { + final FileTree files; + File file = resolvedArtifact.getFile(); + if (file.getName().endsWith(".zip")) { + files = project.zipTree(file); + } else if (file.getName().endsWith("tar.gz")) { + files = project.tarTree(file); + } else { + throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); + } + logger.info("Extracting {}@{}", resolvedArtifact, config); + spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup())); + spec.into(getExtractDir(project)); + })) + ); + } + }); }); // When the project evaluated we know of all tasks that use clusters. @@ -347,29 +379,6 @@ public class TestClustersPlugin implements Plugin { distribution.getFileExtension()); } - - sync.getInputs().files(helperConfiguration); - // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) - sync.doLast(new Action() { - @Override - public void execute(Task task) { - project.copy(spec -> - helperConfiguration.getResolvedConfiguration().getResolvedArtifacts().forEach(resolvedArtifact -> { - final FileTree files; - File file = resolvedArtifact.getFile(); - if (file.getName().endsWith(".zip")) { - files = project.zipTree(file); - } else if (file.getName().endsWith("tar.gz")) { - files = project.tarTree(file); - } else { - throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); - } - - spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup())); - spec.into(getExtractDir(project)); - })); - } - }); }))); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 762bcc5ff9b..bf982fa3aa2 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -75,7 +75,7 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { } public void testCurrentExamplePlugin() throws IOException { - FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot()); + FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot(), pathname -> pathname.getPath().contains("/build/") == false); adaptBuildScriptForTest(); @@ -156,5 +156,4 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { throw new RuntimeException(e); } } - } From 1be5bb5bfde5a97f6618d1e61298426f57390065 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 10 May 2019 13:55:27 +0100 Subject: [PATCH 25/67] Recognise direct buffers in heap size docs (#42070) This commit slightly reworks the recommendations in the docs about setting the heap size: * the "rules of thumb" are actually instructions that should be followed * the reason for setting `Xmx` to 50% of the heap size is more subtle than just leaving space for the filesystem cache * it is normal to see Elasticsearch using more memory than `Xmx` * replace `cutoff` and `limit` with `threshold` since all three terms are used interchangeably * since we recommend setting `Xmx` equal to `Xms`, avoid talking about setting `Xmx` in isolation Relates #41954 --- .../important-settings/heap-size.asciidoc | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/docs/reference/setup/important-settings/heap-size.asciidoc b/docs/reference/setup/important-settings/heap-size.asciidoc index 77aa23b61df..890a9786e09 100644 --- a/docs/reference/setup/important-settings/heap-size.asciidoc +++ b/docs/reference/setup/important-settings/heap-size.asciidoc @@ -7,42 +7,48 @@ to ensure that Elasticsearch has enough heap available. Elasticsearch will assign the entire heap specified in <> via the `Xms` (minimum heap size) and `Xmx` (maximum -heap size) settings. +heap size) settings. You should set these two settings to be equal to each +other. The value for these setting depends on the amount of RAM available on your -server. Good rules of thumb are: +server: -* Set the minimum heap size (`Xms`) and maximum heap size (`Xmx`) to be equal to - each other. +* Set `Xmx` and `Xms` to no more than 50% of your physical RAM. {es} requires + memory for purposes other than the JVM heap and it is important to leave + space for this. For instance, {es} uses off-heap buffers for efficient + network communication, relies on the operating system's filesystem cache for + efficient access to files, and the JVM itself requires some memory too. It is + normal to observe the {es} process using more memory than the limit + configured with the `Xmx` setting. -* The more heap available to Elasticsearch, the more memory it can use for - caching. But note that too much heap can subject you to long garbage - collection pauses. - -* Set `Xmx` to no more than 50% of your physical RAM, to ensure that there is - enough physical RAM left for kernel file system caches. - -* Don’t set `Xmx` to above the cutoff that the JVM uses for compressed object - pointers (compressed oops); the exact cutoff varies but is near 32 GB. You can - verify that you are under the limit by looking for a line in the logs like the - following: +* Set `Xmx` and `Xms` to no more than the threshold that the JVM uses for + compressed object pointers (compressed oops); the exact threshold varies but + is near 32 GB. You can verify that you are under the threshold by looking for a + line in the logs like the following: + heap size [1.9gb], compressed ordinary object pointers [true] -* Even better, try to stay below the threshold for zero-based compressed oops; - the exact cutoff varies but 26 GB is safe on most systems, but can be as large - as 30 GB on some systems. You can verify that you are under the limit by - starting Elasticsearch with the JVM options `-XX:+UnlockDiagnosticVMOptions - -XX:+PrintCompressedOopsMode` and looking for a line like the following: +* Ideally set `Xmx` and `Xms` to no more than the threshold for zero-based + compressed oops; the exact threshold varies but 26 GB is safe on most + systems, but can be as large as 30 GB on some systems. You can verify that + you are under this threshold by starting {es} with the JVM options + `-XX:+UnlockDiagnosticVMOptions -XX:+PrintCompressedOopsMode` and looking for + a line like the following: + -- heap address: 0x000000011be00000, size: 27648 MB, zero based Compressed Oops -showing that zero-based compressed oops are enabled instead of +showing that zero-based compressed oops are enabled. If zero-based compressed +oops are not enabled then you will see a line like the following instead: heap address: 0x0000000118400000, size: 28672 MB, Compressed Oops with base: 0x00000001183ff000 -- +The more heap available to {es}, the more memory it can use for its internal +caches, but the less memory it leaves available for the operating system to use +for the filesystem cache. Also, larger heaps can cause longer garbage +collection pauses. + Here are examples of how to set the heap size via the jvm.options file: [source,txt] @@ -66,7 +72,7 @@ ES_JAVA_OPTS="-Xms4000m -Xmx4000m" ./bin/elasticsearch <2> <2> Set the minimum and maximum heap size to 4000 MB. NOTE: Configuring the heap for the <> is -different than the above. The values initially populated for the Windows service -can be configured as above but are different after the service has been +different than the above. The values initially populated for the Windows +service can be configured as above but are different after the service has been installed. Consult the <> for additional details. From 2a8a64d3f1c605b8694f0c8eceae4fc05f61bac8 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 10 May 2019 14:02:09 +0100 Subject: [PATCH 26/67] Remove extra `ms` from log message (#42068) This log message logs a `TimeValue` which includes units, but also logs an extra `ms`. This commit removes the extra `ms`. --- .../java/org/elasticsearch/cluster/coordination/JoinHelper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index b2206d4b426..8615258cc37 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -243,7 +243,7 @@ public class JoinHelper { } void logWarnWithTimestamp() { - logger.info(() -> new ParameterizedMessage("last failed join attempt was {} ms ago, failed to join {} with {}", + logger.info(() -> new ParameterizedMessage("last failed join attempt was {} ago, failed to join {} with {}", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - timestamp)), destination, joinRequest), From 809ed3b721f8e86ddffb65e00eb6baba234bb53f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 10 May 2019 08:50:55 -0400 Subject: [PATCH 27/67] shouldRollGeneration should execute under read lock (#41696) Translog#shouldRollGeneration should execute under the read lock since it accesses the current writer. --- .../main/java/org/elasticsearch/index/translog/Translog.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 63d21ffea15..4d23ce8cdc0 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -561,9 +561,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * @return {@code true} if the current generation should be rolled to a new generation */ public boolean shouldRollGeneration() { - final long size = this.current.sizeInBytes(); final long threshold = this.indexSettings.getGenerationThresholdSize().getBytes(); - return size > threshold; + try (ReleasableLock ignored = readLock.acquire()) { + return this.current.sizeInBytes() > threshold; + } } /** From 44c34185319e79c1831edd2653b6b3a46acf1d9c Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 10 May 2019 14:38:10 +0100 Subject: [PATCH 28/67] Simplify handling of keyword field normalizers (#42002) We have a number of places in analysis-handling code where we check if a field type is a keyword field, and if so then extract the normalizer rather than pulling the index-time analyzer. However, a keyword normalizer is really just a special case of an analyzer, so we should be able to simplify this by setting the normalizer as the index-time analyzer at construction time. --- .../subphase/highlight/AnnotatedTextHighlighter.java | 4 ++-- .../indices/analyze/TransportAnalyzeAction.java | 10 ++-------- .../index/mapper/KeywordFieldMapper.java | 3 ++- .../index/termvectors/TermVectorsService.java | 8 +------- .../fetch/subphase/highlight/HighlightUtils.java | 12 ------------ .../fetch/subphase/highlight/PlainHighlighter.java | 2 +- .../fetch/subphase/highlight/UnifiedHighlighter.java | 6 +++--- .../index/mapper/KeywordFieldMapperTests.java | 3 ++- 8 files changed, 13 insertions(+), 35 deletions(-) diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java index 2ba7838b909..6b1a1c9254c 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java @@ -39,8 +39,8 @@ public class AnnotatedTextHighlighter extends UnifiedHighlighter { public static final String NAME = "annotated"; @Override - protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) { - return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type, hitContext), hitContext); + protected Analyzer getAnalyzer(DocumentMapper docMapper, HitContext hitContext) { + return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, hitContext), hitContext); } // Convert the marked-up values held on-disk to plain-text versions for highlighting diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 62d8c0e91da..55bd5937426 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -50,9 +50,9 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.CustomAnalyzer; import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NormalizingCharFilterFactory; import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory; -import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -141,14 +141,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction fragsList = new ArrayList<>(); List textsToHighlight; - Analyzer analyzer = HighlightUtils.getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); + Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer(); final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset(); try { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index 2a75e9c58f4..b806fb9cd31 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -70,7 +70,7 @@ public class UnifiedHighlighter implements Highlighter { int numberOfFragments; try { - final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType, + final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), hitContext); List fieldValues = loadFieldValues(fieldType, field, context, hitContext); if (fieldValues.size() == 0) { @@ -150,8 +150,8 @@ public class UnifiedHighlighter implements Highlighter { } - protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) { - return HighlightUtils.getAnalyzer(docMapper, type); + protected Analyzer getAnalyzer(DocumentMapper docMapper, HitContext hitContext) { + return docMapper.mappers().indexAnalyzer(); } protected List loadFieldValues(MappedFieldType fieldType, SearchContextHighlight.Field field, SearchContext context, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index dd7cb17ef12..1bdf40bcc67 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -401,7 +401,8 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { () -> indexService.mapperService().merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertEquals( - "Mapper for [field] conflicts with existing mapping:\n[mapper [field] has different [normalizer]]", + "Mapper for [field] conflicts with existing mapping:\n" + + "[mapper [field] has different [analyzer], mapper [field] has different [normalizer]]", e.getMessage()); } From 80432a35520c8cfe9d93ee45e7bf70a10be8ffd5 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 10 May 2019 08:43:35 -0600 Subject: [PATCH 29/67] Remove close method in PageCacheRecycler/Recycler (#41917) The changes in #39317 brought to light some concurrency issues in the close method of Recyclers as we do not wait for threads running in the threadpool to be finished prior to the closing of the PageCacheRecycler and the Recyclers that are used internally. #41695 was opened to address the concurrent close issues but upon review, the closing of these classes is not really needed as the instances should be become available for garbage collection once there is no longer a reference to the closed node. Closes #41683 --- .../client/transport/TransportClient.java | 2 -- .../common/recycler/AbstractRecycler.java | 5 ----- .../common/recycler/ConcurrentDequeRecycler.java | 7 ------- .../elasticsearch/common/recycler/DequeRecycler.java | 10 ---------- .../elasticsearch/common/recycler/FilterRecycler.java | 5 ----- .../elasticsearch/common/recycler/NoneRecycler.java | 5 ----- .../org/elasticsearch/common/recycler/Recycler.java | 4 +--- .../org/elasticsearch/common/recycler/Recyclers.java | 7 ------- .../elasticsearch/common/util/PageCacheRecycler.java | 9 +-------- server/src/main/java/org/elasticsearch/node/Node.java | 3 --- .../common/recycler/AbstractRecyclerTestCase.java | 10 ---------- 11 files changed, 2 insertions(+), 65 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index b5720c023f0..4c2f4932de2 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -184,7 +184,6 @@ public abstract class TransportClient extends AbstractClient { resourcesToClose.add(circuitBreakerService); PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.REQUEST); - resourcesToClose.add(pageCacheRecycler); modules.add(settingsModule); NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool, bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null); @@ -376,7 +375,6 @@ public abstract class TransportClient extends AbstractClient { closeables.add(plugin); } closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS)); - closeables.add(injector.getInstance(PageCacheRecycler.class)); IOUtils.closeWhileHandlingException(closeables); } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java index 05fa5259726..546d801d70b 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java @@ -28,9 +28,4 @@ abstract class AbstractRecycler implements Recycler { this.c = c; } - @Override - public void close() { - // no-op by default - } - } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java index 04103c5e274..54374cc3bde 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java @@ -37,13 +37,6 @@ public class ConcurrentDequeRecycler extends DequeRecycler { this.size = new AtomicInteger(); } - @Override - public void close() { - assert deque.size() == size.get(); - super.close(); - size.set(0); - } - @Override public V obtain() { final V v = super.obtain(); diff --git a/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java index a40befe9d81..0f201133ece 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java @@ -36,16 +36,6 @@ public class DequeRecycler extends AbstractRecycler { this.maxSize = maxSize; } - @Override - public void close() { - // call destroy() for every cached object - for (T t : deque) { - c.destroy(t); - } - // finally get rid of all references - deque.clear(); - } - @Override public V obtain() { final T v = deque.pollFirst(); diff --git a/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java index 426185173e5..5011402f6d9 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java @@ -34,9 +34,4 @@ abstract class FilterRecycler implements Recycler { return wrap(getDelegate().obtain()); } - @Override - public void close() { - getDelegate().close(); - } - } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java index 865182b88e1..102f1d42430 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java @@ -31,11 +31,6 @@ public class NoneRecycler extends AbstractRecycler { return new NV<>(c.newInstance()); } - @Override - public void close() { - // no-op - } - public static class NV implements Recycler.V { T value; diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java b/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java index 161e6463423..95a67fdf8e0 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.lease.Releasable; * A recycled object, note, implementations should support calling obtain and then recycle * on different threads. */ -public interface Recycler extends Releasable { +public interface Recycler { interface Factory { Recycler build(); @@ -53,8 +53,6 @@ public interface Recycler extends Releasable { } - void close(); - V obtain(); } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index 3ea9d17c25f..5bfd3448e23 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -145,13 +145,6 @@ public enum Recyclers { return recyclers[slot()]; } - @Override - public void close() { - for (Recycler recycler : recyclers) { - recycler.close(); - } - } - }; } diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 4ca408e0441..40b9a8c7e94 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -20,8 +20,6 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; @@ -39,7 +37,7 @@ import static org.elasticsearch.common.recycler.Recyclers.dequeFactory; import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ -public class PageCacheRecycler implements Releasable { +public class PageCacheRecycler { public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); @@ -73,11 +71,6 @@ public class PageCacheRecycler implements Releasable { NON_RECYCLING_INSTANCE = new PageCacheRecycler(Settings.builder().put(LIMIT_HEAP_SETTING.getKey(), "0%").build()); } - @Override - public void close() { - Releasables.close(true, bytePage, intPage, longPage, objectPage); - } - public PageCacheRecycler(Settings settings) { final Type type = TYPE_SETTING.get(settings); final long limit = LIMIT_HEAP_SETTING.get(settings).getBytes(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index b79cad68dfa..699d032e35e 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -376,7 +376,6 @@ public class Node implements Closeable { PageCacheRecycler pageCacheRecycler = createPageCacheRecycler(settings); BigArrays bigArrays = createBigArrays(pageCacheRecycler, circuitBreakerService); - resourcesToClose.add(pageCacheRecycler); modules.add(settingsModule); List namedWriteables = Stream.of( NetworkModule.getNamedWriteables().stream(), @@ -842,8 +841,6 @@ public class Node implements Closeable { toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); - toClose.add(() -> stopWatch.stop().start("page_cache_recycler")); - toClose.add(injector.getInstance(PageCacheRecycler.class)); toClose.add(stopWatch::stop); if (logger.isTraceEnabled()) { diff --git a/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java b/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java index be7799fcd6c..d2d12b32da4 100644 --- a/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java @@ -99,7 +99,6 @@ public abstract class AbstractRecyclerTestCase extends ESTestCase { assertNotSame(b1, b2); } o.close(); - r.close(); } public void testRecycle() { @@ -111,7 +110,6 @@ public abstract class AbstractRecyclerTestCase extends ESTestCase { o = r.obtain(); assertRecycled(o.v()); o.close(); - r.close(); } public void testDoubleRelease() { @@ -128,7 +126,6 @@ public abstract class AbstractRecyclerTestCase extends ESTestCase { final Recycler.V v2 = r.obtain(); final Recycler.V v3 = r.obtain(); assertNotSame(v2.v(), v3.v()); - r.close(); } public void testDestroyWhenOverCapacity() { @@ -152,9 +149,6 @@ public abstract class AbstractRecyclerTestCase extends ESTestCase { // release first ref, verify for destruction o.close(); assertDead(data); - - // close the rest - r.close(); } public void testClose() { @@ -171,10 +165,6 @@ public abstract class AbstractRecyclerTestCase extends ESTestCase { // verify that recycle() ran assertRecycled(data); - - // closing the recycler should mark recycled instances via destroy() - r.close(); - assertDead(data); } } From cd5f1b53e8f6052f6175f8f922fcfd40f518752a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 10 May 2019 11:47:05 -0400 Subject: [PATCH 30/67] Remove reference to fs.data.spins in docs We long ago removed fs.data.spins from the nodes stats. This commit removes reference to this in the docs. --- docs/reference/cluster/nodes-stats.asciidoc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 4bd3c2c9647..bb24dffd40f 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -125,11 +125,6 @@ information that concern the file system: `fs.data.available_in_bytes`:: Total number of bytes available to this Java virtual machine on this file store -`fs.data.spins` (Linux only):: - Indicates if the file store is backed by spinning storage. - `null` means we could not determine it, `true` means the device possibly spins - and `false` means it does not (ex: solid-state disks). - `fs.io_stats.devices` (Linux only):: Array of disk metrics for each device that is backing an Elasticsearch data path. These disk metrics are probed periodically From 3e59c31a127d387148e62879c14c681a80c5b9b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 10 May 2019 18:07:05 +0200 Subject: [PATCH 31/67] Change IndexAnalyzers default analyzer access (#42011) Currently IndexAnalyzers keeps the three default as separate class members although they should refer to the same analyzers held in the additional analyzers map under the default names. This assumption should be made more explicit by keeping all analyzers in the map. This change adapts the constructor to check all the default entries are there and the getters to reach into the map with the default names when needed. --- .../metadata/MetaDataIndexUpgradeService.java | 2 +- .../index/analysis/AnalysisRegistry.java | 29 +++---- .../index/analysis/IndexAnalyzers.java | 29 +++---- .../index/analysis/AnalysisRegistryTests.java | 4 +- .../index/analysis/IndexAnalyzersTests.java | 87 +++++++++++++++++++ .../index/mapper/TypeParsersTests.java | 41 +++++---- .../index/engine/TranslogHandler.java | 7 +- 7 files changed, 144 insertions(+), 55 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index d3520da6702..483835f633e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -195,7 +195,7 @@ public class MetaDataIndexUpgradeService { } }; try (IndexAnalyzers fakeIndexAnalzyers = - new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { + new IndexAnalyzers(indexSettings, analyzerMap, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index b0d9c778d2a..f84b64f20fa 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -52,6 +52,11 @@ public final class AnalysisRegistry implements Closeable { public static final String INDEX_ANALYSIS_CHAR_FILTER = "index.analysis.char_filter"; public static final String INDEX_ANALYSIS_FILTER = "index.analysis.filter"; public static final String INDEX_ANALYSIS_TOKENIZER = "index.analysis.tokenizer"; + + public static final String DEFAULT_ANALYZER_NAME = "default"; + public static final String DEFAULT_SEARCH_ANALYZER_NAME = "default_search"; + public static final String DEFAULT_SEARCH_QUOTED_ANALYZER_NAME = "default_search_quoted"; + private final PrebuiltAnalysis prebuiltAnalysis; private final Map cachedAnalyzer = new ConcurrentHashMap<>(); @@ -442,37 +447,29 @@ public final class AnalysisRegistry implements Closeable { "whitespace", () -> new WhitespaceTokenizer(), tokenFilterFactoryFactories, charFilterFactoryFactories); } - if (!analyzers.containsKey("default")) { - NamedAnalyzer defaultAnalyzer = produceAnalyzer("default", new StandardAnalyzerProvider(indexSettings, null, "default", - Settings.Builder.EMPTY_SETTINGS), tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); - analyzers.put("default", defaultAnalyzer); + if (!analyzers.containsKey(DEFAULT_ANALYZER_NAME)) { + analyzers.put(DEFAULT_ANALYZER_NAME, + produceAnalyzer(DEFAULT_ANALYZER_NAME, + new StandardAnalyzerProvider(indexSettings, null, DEFAULT_ANALYZER_NAME, Settings.Builder.EMPTY_SETTINGS), + tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories)); } - if (!analyzers.containsKey("default_search")) { - analyzers.put("default_search", analyzers.get("default")); - } - if (!analyzers.containsKey("default_search_quoted")) { - analyzers.put("default_search_quoted", analyzers.get("default_search")); - } - - NamedAnalyzer defaultAnalyzer = analyzers.get("default"); + NamedAnalyzer defaultAnalyzer = analyzers.get(DEFAULT_ANALYZER_NAME); if (defaultAnalyzer == null) { throw new IllegalArgumentException("no default analyzer configured"); } defaultAnalyzer.checkAllowedInMode(AnalysisMode.ALL); + if (analyzers.containsKey("default_index")) { throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use " + "[index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); } - NamedAnalyzer defaultSearchAnalyzer = analyzers.getOrDefault("default_search", defaultAnalyzer); - NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.getOrDefault("default_search_quote", defaultSearchAnalyzer); for (Map.Entry analyzer : analyzers.entrySet()) { if (analyzer.getKey().startsWith("_")) { throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\""); } } - return new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer, analyzers, normalizers, - whitespaceNormalizers); + return new IndexAnalyzers(indexSettings, analyzers, normalizers, whitespaceNormalizers); } private static NamedAnalyzer produceAnalyzer(String name, AnalyzerProvider analyzerFactory, diff --git a/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java b/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java index 4cb0b9aa324..4f1cbeb4022 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java @@ -25,9 +25,13 @@ import org.elasticsearch.index.IndexSettings; import java.io.Closeable; import java.io.IOException; import java.util.Map; +import java.util.Objects; import java.util.stream.Stream; import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME; /** * IndexAnalyzers contains a name to analyzer mapping for a specific index. @@ -37,23 +41,18 @@ import static java.util.Collections.unmodifiableMap; * @see AnalysisRegistry */ public final class IndexAnalyzers extends AbstractIndexComponent implements Closeable { - private final NamedAnalyzer defaultIndexAnalyzer; - private final NamedAnalyzer defaultSearchAnalyzer; - private final NamedAnalyzer defaultSearchQuoteAnalyzer; private final Map analyzers; private final Map normalizers; private final Map whitespaceNormalizers; - public IndexAnalyzers(IndexSettings indexSettings, NamedAnalyzer defaultIndexAnalyzer, NamedAnalyzer defaultSearchAnalyzer, - NamedAnalyzer defaultSearchQuoteAnalyzer, Map analyzers, - Map normalizers, Map whitespaceNormalizers) { + public IndexAnalyzers(IndexSettings indexSettings, Map analyzers, Map normalizers, + Map whitespaceNormalizers) { super(indexSettings); - if (defaultIndexAnalyzer.name().equals("default") == false) { - throw new IllegalStateException("default analyzer must have the name [default] but was: [" + defaultIndexAnalyzer.name() + "]"); + Objects.requireNonNull(analyzers.get(DEFAULT_ANALYZER_NAME), "the default analyzer must be set"); + if (analyzers.get(DEFAULT_ANALYZER_NAME).name().equals(DEFAULT_ANALYZER_NAME) == false) { + throw new IllegalStateException( + "default analyzer must have the name [default] but was: [" + analyzers.get(DEFAULT_ANALYZER_NAME).name() + "]"); } - this.defaultIndexAnalyzer = defaultIndexAnalyzer; - this.defaultSearchAnalyzer = defaultSearchAnalyzer; - this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer; this.analyzers = unmodifiableMap(analyzers); this.normalizers = unmodifiableMap(normalizers); this.whitespaceNormalizers = unmodifiableMap(whitespaceNormalizers); @@ -84,21 +83,21 @@ public final class IndexAnalyzers extends AbstractIndexComponent implements Clos * Returns the default index analyzer for this index */ public NamedAnalyzer getDefaultIndexAnalyzer() { - return defaultIndexAnalyzer; + return analyzers.get(DEFAULT_ANALYZER_NAME); } /** - * Returns the default search analyzer for this index + * Returns the default search analyzer for this index. If not set, this will return the default analyzer */ public NamedAnalyzer getDefaultSearchAnalyzer() { - return defaultSearchAnalyzer; + return analyzers.getOrDefault(DEFAULT_SEARCH_ANALYZER_NAME, getDefaultIndexAnalyzer()); } /** * Returns the default search quote analyzer for this index */ public NamedAnalyzer getDefaultSearchQuoteAnalyzer() { - return defaultSearchQuoteAnalyzer; + return analyzers.getOrDefault(DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, getDefaultSearchAnalyzer()); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index b836a5d0372..0d4fde38291 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -123,9 +123,9 @@ public class AnalysisRegistryTests extends ESTestCase { Analyzer analyzer = new CustomAnalyzer("tokenizerName", null, new CharFilterFactory[0], new TokenFilterFactory[] { tokenFilter }); MapperException ex = expectThrows(MapperException.class, () -> emptyRegistry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default", new PreBuiltAnalyzerProvider("my_analyzer", AnalyzerScope.INDEX, analyzer)), emptyMap(), + singletonMap("default", new PreBuiltAnalyzerProvider("default", AnalyzerScope.INDEX, analyzer)), emptyMap(), emptyMap(), emptyMap(), emptyMap())); - assertEquals("analyzer [my_analyzer] contains filters [my_filter] that are not allowed to run in all mode.", ex.getMessage()); + assertEquals("analyzer [default] contains filters [my_filter] that are not allowed to run in all mode.", ex.getMessage()); } public void testOverrideDefaultIndexAnalyzerIsUnsupported() { diff --git a/server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java b/server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java new file mode 100644 index 00000000000..f5808c4b0da --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class IndexAnalyzersTests extends ESTestCase { + + /** + * test the checks in the constructor + */ + public void testAnalyzerMapChecks() { + Map analyzers = new HashMap<>(); + { + NullPointerException ex = expectThrows(NullPointerException.class, + () -> new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())); + assertEquals("the default analyzer must be set", ex.getMessage()); + } + { + analyzers.put(AnalysisRegistry.DEFAULT_ANALYZER_NAME, + new NamedAnalyzer("otherName", AnalyzerScope.INDEX, new StandardAnalyzer())); + IllegalStateException ex = expectThrows(IllegalStateException.class, + () -> new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())); + assertEquals("default analyzer must have the name [default] but was: [otherName]", ex.getMessage()); + } + } + + public void testAnalyzerDefaults() throws IOException { + Map analyzers = new HashMap<>(); + NamedAnalyzer analyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); + analyzers.put(AnalysisRegistry.DEFAULT_ANALYZER_NAME, analyzer); + + // if only "default" is set in the map, all getters should return the same analyzer + try (IndexAnalyzers indexAnalyzers = new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())) { + assertSame(analyzer, indexAnalyzers.getDefaultIndexAnalyzer()); + assertSame(analyzer, indexAnalyzers.getDefaultSearchAnalyzer()); + assertSame(analyzer, indexAnalyzers.getDefaultSearchQuoteAnalyzer()); + } + + analyzers.put(AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME, + new NamedAnalyzer("my_search_analyzer", AnalyzerScope.INDEX, new StandardAnalyzer())); + try (IndexAnalyzers indexAnalyzers = new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())) { + assertSame(analyzer, indexAnalyzers.getDefaultIndexAnalyzer()); + assertEquals("my_search_analyzer", indexAnalyzers.getDefaultSearchAnalyzer().name()); + assertEquals("my_search_analyzer", indexAnalyzers.getDefaultSearchQuoteAnalyzer().name()); + } + + analyzers.put(AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, + new NamedAnalyzer("my_search_quote_analyzer", AnalyzerScope.INDEX, new StandardAnalyzer())); + try (IndexAnalyzers indexAnalyzers = new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())) { + assertSame(analyzer, indexAnalyzers.getDefaultIndexAnalyzer()); + assertEquals("my_search_analyzer", indexAnalyzers.getDefaultSearchAnalyzer().name()); + assertEquals("my_search_quote_analyzer", indexAnalyzers.getDefaultSearchQuoteAnalyzer().name()); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 7e216c37686..bc59c59aa54 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -40,6 +40,9 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -57,22 +60,20 @@ public class TypeParsersTests extends ESTestCase { Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class); // check AnalysisMode.ALL works - Map analyzers = new HashMap<>(); + Map analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", AnalysisMode.ALL))); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, - null, analyzers, Collections.emptyMap(), Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); // check that "analyzer" set to something that only supports AnalysisMode.SEARCH_TIME or AnalysisMode.INDEX_TIME is blocked AnalysisMode mode = randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME); - analyzers = new HashMap<>(); + analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); - indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, null, analyzers, - Collections.emptyMap(), Collections.emptyMap()); + indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); MapperException ex = expectThrows(MapperException.class, () -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext)); @@ -80,6 +81,14 @@ public class TypeParsersTests extends ESTestCase { ex.getMessage()); } + private static Map defaultAnalyzers() { + Map analyzers = new HashMap<>(); + analyzers.put(DEFAULT_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null)); + analyzers.put(DEFAULT_SEARCH_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null)); + analyzers.put(DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null)); + return analyzers; + } + public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField"); for (String settingToTest : new String[] { "search_analyzer", "search_quote_analyzer" }) { @@ -92,25 +101,23 @@ public class TypeParsersTests extends ESTestCase { Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class); // check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works - Map analyzers = new HashMap<>(); + Map analyzers = defaultAnalyzers(); AnalysisMode mode = randomFrom(AnalysisMode.ALL, AnalysisMode.SEARCH_TIME); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer())); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, - null, analyzers, Collections.emptyMap(), Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked mode = AnalysisMode.INDEX_TIME; - analyzers = new HashMap<>(); + analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer())); - indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, null, - analyzers, Collections.emptyMap(), Collections.emptyMap()); + indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); MapperException ex = expectThrows(MapperException.class, () -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext)); @@ -127,11 +134,10 @@ public class TypeParsersTests extends ESTestCase { // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer AnalysisMode mode = AnalysisMode.INDEX_TIME; - Map analyzers = new HashMap<>(); + Map analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, - null, analyzers, Collections.emptyMap(), Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); MapperException ex = expectThrows(MapperException.class, () -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext)); @@ -140,14 +146,13 @@ public class TypeParsersTests extends ESTestCase { // check AnalysisMode.INDEX_TIME is okay if search analyzer is also set fieldNode.put("search_analyzer", "standard"); - analyzers = new HashMap<>(); + analyzers = defaultAnalyzers(); mode = randomFrom(AnalysisMode.ALL, AnalysisMode.INDEX_TIME); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer())); - indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, null, analyzers, - Collections.emptyMap(), Collections.emptyMap()); + indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 7090419e9f2..ced2a7bff78 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -60,9 +61,9 @@ public class TranslogHandler implements Engine.TranslogRecoveryRunner { } public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) { - NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); - IndexAnalyzers indexAnalyzers = - new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, emptyMap(), emptyMap(), emptyMap()); + Map analyzers = new HashMap<>(); + analyzers.put(AnalysisRegistry.DEFAULT_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, emptyMap(), emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry(); mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry, From 69824ed9084a06eba57f381d18610d3466e7c32f Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 10 May 2019 11:00:17 -0700 Subject: [PATCH 32/67] Cleanup plugin bin directories (#41907) This commit adds deletion of the bin directory to postrm cleanup. While the package's bin files are cleaned up by the package manager, plugins may have created subdirectories under bin. We already cleanup plugins, but not the extra bin dirs their installation created. closes #18109 --- distribution/packages/src/common/scripts/postrm | 8 +++++++- .../org/elasticsearch/packaging/test/PackageTestCase.java | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/distribution/packages/src/common/scripts/postrm b/distribution/packages/src/common/scripts/postrm index a3cb5e1208f..c54df43450a 100644 --- a/distribution/packages/src/common/scripts/postrm +++ b/distribution/packages/src/common/scripts/postrm @@ -8,7 +8,6 @@ # On RedHat, # $1=0 : indicates a removal # $1=1 : indicates an upgrade - REMOVE_DIRS=false REMOVE_USER_AND_GROUP=false @@ -55,6 +54,13 @@ if [ "$REMOVE_DIRS" = "true" ]; then echo " OK" fi + # plugins may have contained bin files + if [ -d /usr/share/elasticsearch/bin ]; then + echo -n "Deleting plugin bin directories..." + rm -rf /usr/share/elasticsearch/bin + echo " OK" + fi + if [ -d /var/run/elasticsearch ]; then echo -n "Deleting PID directory..." rm -rf /var/run/elasticsearch diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index b6e26672f47..193edd7c32f 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -170,6 +170,9 @@ public abstract class PackageTestCase extends PackagingTestCase { public void test50Remove() throws Exception { assumeThat(installation, is(notNullValue())); + // add fake bin directory as if a plugin was installed + Files.createDirectories(installation.bin.resolve("myplugin")); + remove(distribution()); // removing must stop the service From 2244697219ee0a886c160fbbb021b076fbf5b647 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 10 May 2019 11:07:29 -0700 Subject: [PATCH 33/67] Fix debian-8 update (#42056) On debian-8, when trying to apt-get update, it currently (sometimes) fails on one of the extra repositories. This failure to update causes keys to not be updated, which later can cause some packages to not install due to lack of key verification. This commit removes the troublesome repository before we attemp to update. closes #42017 --- Vagrantfile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 5e27e285487..1acf4fe819b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -70,7 +70,10 @@ Vagrant.configure(2) do |config| 'debian-8'.tap do |box| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/debian-8-x86_64' - deb_common config, box + deb_common config, box, extra: <<-SHELL + # this sometimes gets a bad ip, and doesn't appear to be needed + rm /etc/apt/sources.list.d/http_debian_net_debian.list + SHELL end end 'debian-9'.tap do |box| @@ -162,8 +165,8 @@ def deb_common(config, name, extra: '') s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile" end extra_with_lintian = <<-SHELL - install lintian #{extra} + install lintian SHELL linux_common( config, From 9944fdf237262d55b0b28a65934daa125f35338f Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 10 May 2019 11:16:57 -0700 Subject: [PATCH 34/67] Don't create tempdir for cli scripts (#41913) The elasticsearch-cli helper script does not use the tempdir created by elasticsearch-env, yet the env script still creates it. This can lead to lots of temp directories being created when running cli scripts in an automated fashion. This commit passes a fake tmpdir to the env script to avoid creation. closes #34445 --- distribution/src/bin/elasticsearch | 4 ++++ distribution/src/bin/elasticsearch-env | 4 ---- distribution/src/bin/elasticsearch-env.bat | 3 --- distribution/src/bin/elasticsearch.bat | 4 ++++ 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 6843607efa1..b7ed2b648b7 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -16,6 +16,10 @@ source "`dirname "$0"`"/elasticsearch-env +if [ -z "$ES_TMPDIR" ]; then + ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory` +fi + ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR}" diff --git a/distribution/src/bin/elasticsearch-env b/distribution/src/bin/elasticsearch-env index 2a490622b34..78cb503ecef 100644 --- a/distribution/src/bin/elasticsearch-env +++ b/distribution/src/bin/elasticsearch-env @@ -84,8 +84,4 @@ ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} ES_DISTRIBUTION_TYPE=${es.distribution.type} ES_BUNDLED_JDK=${es.bundled_jdk} -if [ -z "$ES_TMPDIR" ]; then - ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory` -fi - cd "$ES_HOME" diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index f1cdc2fd224..8ac141986a4 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -64,6 +64,3 @@ if defined JAVA_OPTS ( rem check the Java version %JAVA% -cp "%ES_CLASSPATH%" "org.elasticsearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 -if not defined ES_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a -) diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index f14185ddc4a..8ef77ac4c7f 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -41,6 +41,10 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) +if not defined ES_TMPDIR ( + for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a +) + set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options @setlocal for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a From 0931815355f017b25122a4a67d28a7146c25acc6 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 10 May 2019 13:22:12 -0500 Subject: [PATCH 35/67] [ML] properly nesting objects in document source (#41901) (#42077) * [ML] properly nesting objects in document source * Throw exception on agg extraction failure, cause it to fail df * throwing error to stop df if unsupported agg is found --- .../transforms/DataFrameTransformTask.java | 3 +- .../pivot/AggregationResultUtils.java | 59 +++++++++++++++++-- .../pivot/AggregationResultUtilsTests.java | 46 +++++++++++++++ .../test/data_frame/preview_transforms.yml | 12 +++- 4 files changed, 112 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 2020300a0cf..2f4945cbeec 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Event; import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; import java.util.Arrays; import java.util.Map; @@ -636,7 +637,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } private boolean isIrrecoverableFailure(Exception e) { - return e instanceof IndexNotFoundException; + return e instanceof IndexNotFoundException || e instanceof AggregationResultUtils.AggregationExtractionException; } synchronized void handleFailure(Exception e) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index b17a65fc4da..8c4fa96a144 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.dataframe.transforms.pivot; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; @@ -29,7 +30,7 @@ import java.util.stream.Stream; import static org.elasticsearch.xpack.dataframe.transforms.pivot.SchemaUtil.isNumericType; -final class AggregationResultUtils { +public final class AggregationResultUtils { private static final Logger logger = LogManager.getLogger(AggregationResultUtils.class); /** @@ -77,17 +78,18 @@ final class AggregationResultUtils { // gather the `value` type, otherwise utilize `getValueAsString` so we don't lose formatted outputs. if (isNumericType(fieldType) || (aggResultSingleValue.getValueAsString().equals(String.valueOf(aggResultSingleValue.value())))) { - document.put(aggName, aggResultSingleValue.value()); + updateDocument(document, aggName, aggResultSingleValue.value()); } else { - document.put(aggName, aggResultSingleValue.getValueAsString()); + updateDocument(document, aggName, aggResultSingleValue.getValueAsString()); } } else if (aggResult instanceof ScriptedMetric) { - document.put(aggName, ((ScriptedMetric) aggResult).aggregation()); + updateDocument(document, aggName, ((ScriptedMetric) aggResult).aggregation()); } else { // Execution should never reach this point! // Creating transforms with unsupported aggregations shall not be possible - logger.error("Dataframe Internal Error: unsupported aggregation ["+ aggResult.getName() +"], ignoring"); - assert false; + throw new AggregationExtractionException("unsupported aggregation [{}] with name [{}]", + aggResult.getType(), + aggResult.getName()); } } @@ -97,4 +99,49 @@ final class AggregationResultUtils { }); } + @SuppressWarnings("unchecked") + static void updateDocument(Map document, String fieldName, Object value) { + String[] fieldTokens = fieldName.split("\\."); + if (fieldTokens.length == 1) { + document.put(fieldName, value); + return; + } + Map internalMap = document; + for (int i = 0; i < fieldTokens.length; i++) { + String token = fieldTokens[i]; + if (i == fieldTokens.length - 1) { + if (internalMap.containsKey(token)) { + if (internalMap.get(token) instanceof Map) { + throw new AggregationExtractionException("mixed object types of nested and non-nested fields [{}]", + fieldName); + } else { + throw new AggregationExtractionException("duplicate key value pairs key [{}] old value [{}] duplicate value [{}]", + fieldName, + internalMap.get(token), + value); + } + } + internalMap.put(token, value); + } else { + if (internalMap.containsKey(token)) { + if (internalMap.get(token) instanceof Map) { + internalMap = (Map)internalMap.get(token); + } else { + throw new AggregationExtractionException("mixed object types of nested and non-nested fields [{}]", + fieldName); + } + } else { + Map newMap = new HashMap<>(); + internalMap.put(token, newMap); + internalMap = newMap; + } + } + } + } + + public static class AggregationExtractionException extends ElasticsearchException { + AggregationExtractionException(String msg, Object... args) { + super(msg, args); + } + } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java index 7eb42951113..1a835c9d19b 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -66,6 +66,7 @@ import java.util.Set; import java.util.stream.Collectors; import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.equalTo; public class AggregationResultUtilsTests extends ESTestCase { @@ -736,6 +737,51 @@ public class AggregationResultUtilsTests extends ESTestCase { assertEquals(documentIdsFirstRun, documentIdsSecondRun); } + @SuppressWarnings("unchecked") + public void testUpdateDocument() { + Map document = new HashMap<>(); + + AggregationResultUtils.updateDocument(document, "foo.bar.baz", 1000L); + AggregationResultUtils.updateDocument(document, "foo.bar.baz2", 2000L); + AggregationResultUtils.updateDocument(document, "bar.field1", 1L); + AggregationResultUtils.updateDocument(document, "metric", 10L); + + assertThat(document.get("metric"), equalTo(10L)); + + Map bar = (Map)document.get("bar"); + + assertThat(bar.get("field1"), equalTo(1L)); + + Map foo = (Map)document.get("foo"); + Map foobar = (Map)foo.get("bar"); + + assertThat(foobar.get("baz"), equalTo(1000L)); + assertThat(foobar.get("baz2"), equalTo(2000L)); + } + + public void testUpdateDocumentWithDuplicate() { + Map document = new HashMap<>(); + + AggregationResultUtils.updateDocument(document, "foo.bar.baz", 1000L); + AggregationResultUtils.AggregationExtractionException exception = + expectThrows(AggregationResultUtils.AggregationExtractionException.class, + () -> AggregationResultUtils.updateDocument(document, "foo.bar.baz", 2000L)); + assertThat(exception.getMessage(), + equalTo("duplicate key value pairs key [foo.bar.baz] old value [1000] duplicate value [2000]")); + } + + public void testUpdateDocumentWithObjectAndNotObject() { + Map document = new HashMap<>(); + + AggregationResultUtils.updateDocument(document, "foo.bar.baz", 1000L); + AggregationResultUtils.AggregationExtractionException exception = + expectThrows(AggregationResultUtils.AggregationExtractionException.class, + () -> AggregationResultUtils.updateDocument(document, "foo.bar", 2000L)); + assertThat(exception.getMessage(), + equalTo("mixed object types of nested and non-nested fields [foo.bar]")); + } + + private void executeTest(GroupConfig groups, Collection aggregationBuilders, Collection pipelineAggregationBuilders, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 94388784177..690383ce96e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -76,18 +76,28 @@ setup: "group_by": { "airline": {"terms": {"field": "airline"}}, "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, - "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.max": {"max": {"field": "time"}}, + "time.min": {"min": {"field": "time"}} + } } } - match: { preview.0.airline: foo } - match: { preview.0.by-hour: "2017-02-49 00" } - match: { preview.0.avg_response: 1.0 } + - match: { preview.0.time.max: "2017-02-18T00:30:00.000Z" } + - match: { preview.0.time.min: "2017-02-18T00:00:00.000Z" } - match: { preview.1.airline: bar } - match: { preview.1.by-hour: "2017-02-49 01" } - match: { preview.1.avg_response: 42.0 } + - match: { preview.1.time.max: "2017-02-18T01:00:00.000Z" } + - match: { preview.1.time.min: "2017-02-18T01:00:00.000Z" } - match: { preview.2.airline: foo } - match: { preview.2.by-hour: "2017-02-49 01" } - match: { preview.2.avg_response: 42.0 } + - match: { preview.2.time.max: "2017-02-18T01:01:00.000Z" } + - match: { preview.2.time.min: "2017-02-18T01:01:00.000Z" } --- "Test preview transform with invalid config": From febee07dccaf3a0776953bc8d028a8b2301b6254 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 10 May 2019 13:22:31 -0500 Subject: [PATCH 36/67] [ML] adding pivot.max_search_page_size option for setting paging size (#41920) (#42079) * [ML] adding pivot.size option for setting paging size * Changing field name to address PR comments * fixing ctor usage * adjust hlrc for field name change --- .../transforms/pivot/PivotConfig.java | 38 ++++++++++++++++--- .../transforms/pivot/PivotConfigTests.java | 4 +- .../DataFrameTransformDocumentationIT.java | 5 ++- .../dataframe/put_data_frame.asciidoc | 5 +++ .../xpack/core/dataframe/DataFrameField.java | 1 + .../action/PutDataFrameTransformAction.java | 8 ++++ .../transforms/pivot/PivotConfig.java | 24 ++++++++++-- .../transforms/pivot/PivotConfigTests.java | 8 +++- .../integration/DataFrameIntegTestCase.java | 8 +++- .../DataFrameTransformProgressIT.java | 4 +- .../dataframe/transforms/pivot/Pivot.java | 8 ++-- .../transforms/DataFrameIndexerTests.java | 29 +++++++++----- .../transforms/pivot/PivotTests.java | 14 ++++++- .../test/data_frame/transforms_crud.yml | 30 +++++++++++++++ 14 files changed, 154 insertions(+), 32 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java index 0c3a6e3ea89..6fdbeb8a43a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java @@ -39,25 +39,29 @@ public class PivotConfig implements ToXContentObject { private static final ParseField GROUP_BY = new ParseField("group_by"); private static final ParseField AGGREGATIONS = new ParseField("aggregations"); + private static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size"); private final GroupConfig groups; private final AggregationConfig aggregationConfig; + private final Integer maxPageSearchSize; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("pivot_config", true, - args -> new PivotConfig((GroupConfig) args[0], (AggregationConfig) args[1])); + args -> new PivotConfig((GroupConfig) args[0], (AggregationConfig) args[1], (Integer) args[2])); static { PARSER.declareObject(constructorArg(), (p, c) -> (GroupConfig.fromXContent(p)), GROUP_BY); PARSER.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p), AGGREGATIONS); + PARSER.declareInt(optionalConstructorArg(), MAX_PAGE_SEARCH_SIZE); } public static PivotConfig fromXContent(final XContentParser parser) { return PARSER.apply(parser, null); } - PivotConfig(GroupConfig groups, final AggregationConfig aggregationConfig) { + PivotConfig(GroupConfig groups, final AggregationConfig aggregationConfig, Integer maxPageSearchSize) { this.groups = groups; this.aggregationConfig = aggregationConfig; + this.maxPageSearchSize = maxPageSearchSize; } @Override @@ -65,6 +69,9 @@ public class PivotConfig implements ToXContentObject { builder.startObject(); builder.field(GROUP_BY.getPreferredName(), groups); builder.field(AGGREGATIONS.getPreferredName(), aggregationConfig); + if (maxPageSearchSize != null) { + builder.field(MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize); + } builder.endObject(); return builder; } @@ -77,6 +84,10 @@ public class PivotConfig implements ToXContentObject { return groups; } + public Integer getMaxPageSearchSize() { + return maxPageSearchSize; + } + @Override public boolean equals(Object other) { if (this == other) { @@ -89,12 +100,14 @@ public class PivotConfig implements ToXContentObject { final PivotConfig that = (PivotConfig) other; - return Objects.equals(this.groups, that.groups) && Objects.equals(this.aggregationConfig, that.aggregationConfig); + return Objects.equals(this.groups, that.groups) + && Objects.equals(this.aggregationConfig, that.aggregationConfig) + && Objects.equals(this.maxPageSearchSize, that.maxPageSearchSize); } @Override public int hashCode() { - return Objects.hash(groups, aggregationConfig); + return Objects.hash(groups, aggregationConfig, maxPageSearchSize); } public static Builder builder() { @@ -104,6 +117,7 @@ public class PivotConfig implements ToXContentObject { public static class Builder { private GroupConfig groups; private AggregationConfig aggregationConfig; + private Integer maxPageSearchSize; /** * Set how to group the source data @@ -135,8 +149,22 @@ public class PivotConfig implements ToXContentObject { return this; } + /** + * Sets the paging maximum paging maxPageSearchSize that date frame transform can use when + * pulling the data from the source index. + * + * If OOM is triggered, the paging maxPageSearchSize is dynamically reduced so that the transform can continue to gather data. + * + * @param maxPageSearchSize Integer value between 10 and 10_000 + * @return the {@link Builder} with the paging maxPageSearchSize set. + */ + public Builder setMaxPageSearchSize(Integer maxPageSearchSize) { + this.maxPageSearchSize = maxPageSearchSize; + return this; + } + public PivotConfig build() { - return new PivotConfig(groups, aggregationConfig); + return new PivotConfig(groups, aggregationConfig, maxPageSearchSize); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java index d2e036d9f1a..5cafcb9f419 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java @@ -32,7 +32,9 @@ import java.util.function.Predicate; public class PivotConfigTests extends AbstractXContentTestCase { public static PivotConfig randomPivotConfig() { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), + AggregationConfigTests.randomAggregationConfig(), + randomBoolean() ? null : randomIntBetween(10, 10_000)); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 3c5059279b4..4bd78f12ae4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -137,8 +137,9 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // end::put-data-frame-transform-agg-config // tag::put-data-frame-transform-pivot-config PivotConfig pivotConfig = PivotConfig.builder() - .setGroups(groupConfig) - .setAggregationConfig(aggConfig) + .setGroups(groupConfig) // <1> + .setAggregationConfig(aggConfig) // <2> + .setMaxPageSearchSize(1000) // <3> .build(); // end::put-data-frame-transform-pivot-config // tag::put-data-frame-transform-config diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index bb1b20aaa1a..567449c9c25 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -66,6 +66,11 @@ Defines the pivot function `group by` fields and the aggregation to reduce the d -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-pivot-config] -------------------------------------------------- +<1> The `GroupConfig` to use in the pivot +<2> The aggregations to use +<3> The maximum paging size for the transform when pulling data +from the source. The size dynamically adjusts as the transform +is running to recover from and prevent OOM issues. ===== GroupConfig The grouping terms. Defines the group by and destination fields diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java index 71bf14cdeb4..c61ed2ddde8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -27,6 +27,7 @@ public final class DataFrameField { public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DESTINATION = new ParseField("dest"); public static final ParseField FORCE = new ParseField("force"); + public static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size"); /** * Fields for checkpointing diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 059bad3494c..2608fb87761 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -56,6 +56,14 @@ public class PutDataFrameTransformAction extends Action { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; + if(config.getPivotConfig() != null + && config.getPivotConfig().getMaxPageSearchSize() != null + && (config.getPivotConfig().getMaxPageSearchSize() < 10 || config.getPivotConfig().getMaxPageSearchSize() > 10_000)) { + validationException = addValidationError( + "pivot.max_page_search_size [" + + config.getPivotConfig().getMaxPageSearchSize() + "] must be greater than 10 and less than 10,000", + validationException); + } for(String failure : config.getPivotConfig().aggFieldValidation()) { validationException = addValidationError(failure, validationException); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java index 79a0a7fc1bf..ab2f7d489ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -35,6 +36,7 @@ public class PivotConfig implements Writeable, ToXContentObject { private static final String NAME = "data_frame_transform_pivot"; private final GroupConfig groups; private final AggregationConfig aggregationConfig; + private final Integer maxPageSearchSize; private static final ConstructingObjectParser STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); @@ -61,7 +63,7 @@ public class PivotConfig implements Writeable, ToXContentObject { throw new IllegalArgumentException("Required [aggregations]"); } - return new PivotConfig(groups, aggregationConfig); + return new PivotConfig(groups, aggregationConfig, (Integer)args[3]); }); parser.declareObject(constructorArg(), @@ -69,18 +71,21 @@ public class PivotConfig implements Writeable, ToXContentObject { parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGREGATIONS); parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGS); + parser.declareInt(optionalConstructorArg(), DataFrameField.MAX_PAGE_SEARCH_SIZE); return parser; } - public PivotConfig(final GroupConfig groups, final AggregationConfig aggregationConfig) { + public PivotConfig(final GroupConfig groups, final AggregationConfig aggregationConfig, Integer maxPageSearchSize) { this.groups = ExceptionsHelper.requireNonNull(groups, DataFrameField.GROUP_BY.getPreferredName()); this.aggregationConfig = ExceptionsHelper.requireNonNull(aggregationConfig, DataFrameField.AGGREGATIONS.getPreferredName()); + this.maxPageSearchSize = maxPageSearchSize; } public PivotConfig(StreamInput in) throws IOException { this.groups = new GroupConfig(in); this.aggregationConfig = new AggregationConfig(in); + this.maxPageSearchSize = in.readOptionalInt(); } @Override @@ -88,6 +93,9 @@ public class PivotConfig implements Writeable, ToXContentObject { builder.startObject(); builder.field(DataFrameField.GROUP_BY.getPreferredName(), groups); builder.field(DataFrameField.AGGREGATIONS.getPreferredName(), aggregationConfig); + if (maxPageSearchSize != null) { + builder.field(DataFrameField.MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize); + } builder.endObject(); return builder; } @@ -113,6 +121,7 @@ public class PivotConfig implements Writeable, ToXContentObject { public void writeTo(StreamOutput out) throws IOException { groups.writeTo(out); aggregationConfig.writeTo(out); + out.writeOptionalInt(maxPageSearchSize); } public AggregationConfig getAggregationConfig() { @@ -123,6 +132,11 @@ public class PivotConfig implements Writeable, ToXContentObject { return groups; } + @Nullable + public Integer getMaxPageSearchSize() { + return maxPageSearchSize; + } + @Override public boolean equals(Object other) { if (this == other) { @@ -135,12 +149,14 @@ public class PivotConfig implements Writeable, ToXContentObject { final PivotConfig that = (PivotConfig) other; - return Objects.equals(this.groups, that.groups) && Objects.equals(this.aggregationConfig, that.aggregationConfig); + return Objects.equals(this.groups, that.groups) + && Objects.equals(this.aggregationConfig, that.aggregationConfig) + && Objects.equals(this.maxPageSearchSize, that.maxPageSearchSize); } @Override public int hashCode() { - return Objects.hash(groups, aggregationConfig); + return Objects.hash(groups, aggregationConfig, maxPageSearchSize); } public boolean isValid() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java index 342e007f212..2f93f50d4d1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java @@ -24,11 +24,15 @@ import static org.hamcrest.Matchers.empty; public class PivotConfigTests extends AbstractSerializingDataFrameTestCase { public static PivotConfig randomPivotConfig() { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), + AggregationConfigTests.randomAggregationConfig(), + randomBoolean() ? null : randomIntBetween(10, 10_000)); } public static PivotConfig randomInvalidPivotConfig() { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomInvalidAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), + AggregationConfigTests.randomInvalidAggregationConfig(), + randomBoolean() ? null : randomIntBetween(10, 10_000)); } @Override diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index 84f3e05de5c..2dd116dfd66 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -172,7 +172,13 @@ abstract class DataFrameIntegTestCase extends ESIntegTestCase { protected PivotConfig createPivotConfig(Map groups, AggregatorFactories.Builder aggregations) throws Exception { - return new PivotConfig(createGroupConfig(groups), createAggConfig(aggregations)); + return createPivotConfig(groups, aggregations, null); + } + + protected PivotConfig createPivotConfig(Map groups, + AggregatorFactories.Builder aggregations, + Integer size) throws Exception { + return new PivotConfig(createGroupConfig(groups), createAggConfig(aggregations), size); } protected DataFrameTransformConfig createTransformConfig(String id, diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index d338d6949f0..194d35e8ba6 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -130,7 +130,7 @@ public class DataFrameTransformProgressIT extends ESIntegTestCase { AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); aggs.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); AggregationConfig aggregationConfig = new AggregationConfig(Collections.emptyMap(), aggs); - PivotConfig pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig); + PivotConfig pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); DataFrameTransformConfig config = new DataFrameTransformConfig("get_progress_transform", sourceConfig, destConfig, @@ -149,7 +149,7 @@ public class DataFrameTransformProgressIT extends ESIntegTestCase { QueryConfig queryConfig = new QueryConfig(Collections.emptyMap(), QueryBuilders.termQuery("user_id", "user_26")); - pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig); + pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); sourceConfig = new SourceConfig(new String[]{REVIEWS_INDEX_NAME}, queryConfig); config = new DataFrameTransformConfig("get_progress_transform", sourceConfig, diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java index 0e5231442d1..8205f2576da 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java @@ -76,13 +76,15 @@ public class Pivot { * the page size, the type of aggregations and the data. As the page size is the number of buckets we return * per page the page size is a multiplier for the costs of aggregating bucket. * - * Initially this returns a default, in future it might inspect the configuration and base the initial size - * on the aggregations used. + * The user may set a maximum in the {@link PivotConfig#getMaxPageSearchSize()}, but if that is not provided, + * the default {@link Pivot#DEFAULT_INITIAL_PAGE_SIZE} is used. + * + * In future we might inspect the configuration and base the initial size on the aggregations used. * * @return the page size */ public int getInitialPageSize() { - return DEFAULT_INITIAL_PAGE_SIZE; + return config.getMaxPageSearchSize() == null ? DEFAULT_INITIAL_PAGE_SIZE : config.getMaxPageSearchSize(); } public SearchRequest buildSearchRequest(SourceConfig sourceConfig, Map position, int pageSize) { diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java index f3f3255f07a..43198c6edfc 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; @@ -39,7 +41,10 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; +import static org.elasticsearch.xpack.core.dataframe.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -169,9 +174,15 @@ public class DataFrameIndexerTests extends ESTestCase { } public void testPageSizeAdapt() throws InterruptedException { - DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfig(); + Integer pageSize = randomBoolean() ? null : randomIntBetween(500, 10_000); + DataFrameTransformConfig config = new DataFrameTransformConfig(randomAlphaOfLength(10), + randomSourceConfig(), + randomDestConfig(), + null, + new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig(), pageSize), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - + final long initialPageSize = pageSize == null ? Pivot.DEFAULT_INITIAL_PAGE_SIZE : pageSize; Function searchFunction = searchRequest -> { throw new SearchPhaseExecutionException("query", "Partial shards failure", new ShardSearchFailure[] { new ShardSearchFailure(new CircuitBreakingException("to much memory", 110, 100, Durability.TRANSIENT)) }); @@ -179,9 +190,7 @@ public class DataFrameIndexerTests extends ESTestCase { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); - Consumer failureConsumer = e -> { - fail("expected circuit breaker exception to be handled"); - }; + Consumer failureConsumer = e -> fail("expected circuit breaker exception to be handled"); final ExecutorService executor = Executors.newFixedThreadPool(1); try { @@ -197,8 +206,8 @@ public class DataFrameIndexerTests extends ESTestCase { latch.countDown(); awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); long pageSizeAfterFirstReduction = indexer.getPageSize(); - assertTrue(Pivot.DEFAULT_INITIAL_PAGE_SIZE > pageSizeAfterFirstReduction); - assertTrue(pageSizeAfterFirstReduction > DataFrameIndexer.MINIMUM_PAGE_SIZE); + assertThat(initialPageSize, greaterThan(pageSizeAfterFirstReduction)); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)DataFrameIndexer.MINIMUM_PAGE_SIZE)); // run indexer a 2nd time final CountDownLatch secondRunLatch = indexer.newLatch(1); @@ -211,8 +220,8 @@ public class DataFrameIndexerTests extends ESTestCase { awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); // assert that page size has been reduced again - assertTrue(pageSizeAfterFirstReduction > indexer.getPageSize()); - assertTrue(pageSizeAfterFirstReduction > DataFrameIndexer.MINIMUM_PAGE_SIZE); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)indexer.getPageSize())); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)DataFrameIndexer.MINIMUM_PAGE_SIZE)); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index 172868833f3..5c1c61a5265 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -97,6 +97,16 @@ public class PivotTests extends ESTestCase { assertInvalidTransform(client, source, pivot); } + public void testInitialPageSize() throws Exception { + int expectedPageSize = 1000; + + Pivot pivot = new Pivot(new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), expectedPageSize)); + assertThat(pivot.getInitialPageSize(), equalTo(expectedPageSize)); + + pivot = new Pivot(new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), null)); + assertThat(pivot.getInitialPageSize(), equalTo(Pivot.DEFAULT_INITIAL_PAGE_SIZE)); + } + public void testSearchFailure() throws Exception { // test a failure during the search operation, transform creation fails if // search has failures although they might just be temporary @@ -167,11 +177,11 @@ public class PivotTests extends ESTestCase { } private PivotConfig getValidPivotConfig() throws IOException { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), null); } private PivotConfig getValidPivotConfig(AggregationConfig aggregationConfig) throws IOException { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), aggregationConfig); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), aggregationConfig, null); } private AggregationConfig getValidAggregationConfig() throws IOException { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index 40af091a91b..65945b6ab74 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -303,6 +303,36 @@ setup: } } --- +"Test put config with invalid pivot size": + - do: + catch: /pivot\.max_page_search_size \[5\] must be greater than 10 and less than 10,000/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-dest-index" }, + "pivot": { + "max_page_search_size": 5, + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - do: + catch: /pivot\.max_page_search_size \[15000\] must be greater than 10 and less than 10,000/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-dest-index" }, + "pivot": { + "max_page_search_size": 15000, + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } +--- "Test creation failures due to duplicate and conflicting field names": - do: catch: /duplicate field \[airline\] detected/ From c19ea0a6f18a3d2bf9c06f9d7e6efee1779686fb Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 10 May 2019 14:27:16 -0400 Subject: [PATCH 37/67] Remove global checkpoint assertion in peer recovery (#41987) If remote recovery copies an index commit which has gaps in sequence numbers to a follower; then these assertions (introduced in #40823) don't hold for follower replicas. Closes #41037 --- .../org/elasticsearch/indices/recovery/RecoveryTarget.java | 7 ------- 1 file changed, 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index d0e548aa6b2..83ad1dc80c5 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -39,7 +39,6 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeases; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotRecoveringException; import org.elasticsearch.index.shard.IndexShardState; @@ -288,9 +287,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget ActionListener.completeWith(listener, () -> { state().getTranslog().totalOperations(totalTranslogOps); indexShard().openEngineAndSkipTranslogRecovery(); - assert indexShard.getGlobalCheckpoint() >= indexShard.seqNoStats().getMaxSeqNo() || - indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0) - : "global checkpoint is not initialized [" + indexShard.seqNoStats() + "]"; return null; }); } @@ -399,9 +395,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { store.ensureIndexHasHistoryUUID(); } - assert globalCheckpoint >= Long.parseLong(sourceMetaData.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)) - || indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0) : - "invalid global checkpoint[" + globalCheckpoint + "] source_meta_data [" + sourceMetaData.getCommitUserData() + "]"; final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); From a85189a55864df9337eab1d9d58b44692424a5f9 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Fri, 10 May 2019 16:06:42 -0600 Subject: [PATCH 38/67] Remove toStepKeys from LifecycleAction (#41775) The `toStepKeys()` method was only called in its own test case. The real list of StepKeys that's used in action execution is generated from the list of actual step objects returned by `toSteps()`. This commit removes that method. --- .../core/indexlifecycle/AllocateAction.java | 7 ------- .../xpack/core/indexlifecycle/DeleteAction.java | 9 --------- .../core/indexlifecycle/ForceMergeAction.java | 8 -------- .../xpack/core/indexlifecycle/FreezeAction.java | 6 ------ .../core/indexlifecycle/LifecycleAction.java | 10 ---------- .../core/indexlifecycle/ReadOnlyAction.java | 6 ------ .../core/indexlifecycle/RolloverAction.java | 9 --------- .../core/indexlifecycle/SetPriorityAction.java | 5 ----- .../xpack/core/indexlifecycle/ShrinkAction.java | 16 ---------------- .../core/indexlifecycle/UnfollowAction.java | 13 ------------- .../indexlifecycle/AbstractActionTestCase.java | 16 ---------------- .../xpack/core/indexlifecycle/MockAction.java | 8 +------- 12 files changed, 1 insertion(+), 112 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java index 7843fa7d86e..cbfec7b9102 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java @@ -149,13 +149,6 @@ public class AllocateAction implements LifecycleAction { return Arrays.asList(allocateStep, routedCheckStep); } - @Override - public List toStepKeys(String phase) { - StepKey allocateKey = new StepKey(phase, NAME, NAME); - StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); - return Arrays.asList(allocateKey, allocationRoutedKey); - } - @Override public int hashCode() { return Objects.hash(numberOfReplicas, include, exclude, require); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java index b61534e4970..ae8eaef6709 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.io.IOException; import java.util.Arrays; @@ -67,14 +66,6 @@ public class DeleteAction implements LifecycleAction { return Arrays.asList(waitForNoFollowersStep, deleteStep); } - @Override - public List toStepKeys(String phase) { - Step.StepKey waitForNoFollowerStepKey = new Step.StepKey(phase, NAME, WaitForNoFollowersStep.NAME); - Step.StepKey deleteStepKey = new Step.StepKey(phase, NAME, DeleteStep.NAME); - - return Arrays.asList(waitForNoFollowerStepKey, deleteStepKey); - } - @Override public int hashCode() { return 1; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java index 2c4508a8355..ace29f6b465 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java @@ -98,14 +98,6 @@ public class ForceMergeAction implements LifecycleAction { return Arrays.asList(readOnlyStep, forceMergeStep, segmentCountStep); } - @Override - public List toStepKeys(String phase) { - StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); - StepKey forceMergeKey = new StepKey(phase, NAME, ForceMergeStep.NAME); - StepKey countKey = new StepKey(phase, NAME, SegmentCountStep.NAME); - return Arrays.asList(readOnlyKey, forceMergeKey, countKey); - } - @Override public int hashCode() { return Objects.hash(maxNumSegments); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java index 63dbedadd4f..7cffaed8091 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java @@ -64,12 +64,6 @@ public class FreezeAction implements LifecycleAction { return Arrays.asList(freezeStep); } - @Override - public List toStepKeys(String phase) { - StepKey freezeStepKey = new StepKey(phase, NAME, FreezeStep.NAME); - return Arrays.asList(freezeStepKey); - } - @Override public int hashCode() { return 1; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java index 3e84813274d..d6ef78496bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.util.List; @@ -30,15 +29,6 @@ public interface LifecycleAction extends ToXContentObject, NamedWriteable { */ List toSteps(Client client, String phase, @Nullable Step.StepKey nextStepKey); - /** - * - * @param phase - * the name of the phase this action is being executed within - * @return the {@link StepKey}s for the steps which will be executed in this - * action - */ - List toStepKeys(String phase); - /** * @return true if this action is considered safe. An action is not safe if * it will produce unwanted side effects or will get stuck when the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java index e338d75a98f..ea348865080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.io.IOException; import java.util.Collections; @@ -65,11 +64,6 @@ public class ReadOnlyAction implements LifecycleAction { Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, readOnlySettings)); } - - @Override - public List toStepKeys(String phase) { - return Collections.singletonList(new Step.StepKey(phase, NAME, NAME)); - } @Override public int hashCode() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java index 25346fefa31..280f8561c35 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java @@ -151,15 +151,6 @@ public class RolloverAction implements LifecycleAction { return Arrays.asList(waitForRolloverReadyStep, rolloverStep, updateDateStep, setIndexingCompleteStep); } - @Override - public List toStepKeys(String phase) { - StepKey rolloverReadyStepKey = new StepKey(phase, NAME, WaitForRolloverReadyStep.NAME); - StepKey rolloverStepKey = new StepKey(phase, NAME, RolloverStep.NAME); - StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); - StepKey setIndexingCompleteStepKey = new StepKey(phase, NAME, INDEXING_COMPLETE_STEP_NAME); - return Arrays.asList(rolloverReadyStepKey, rolloverStepKey, updateDateStepKey, setIndexingCompleteStepKey); - } - @Override public int hashCode() { return Objects.hash(maxSize, maxAge, maxDocs); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java index 507da4613e2..1bc09e7f42e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java @@ -90,11 +90,6 @@ public class SetPriorityAction implements LifecycleAction { return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, indexPriority)); } - @Override - public List toStepKeys(String phase) { - return Collections.singletonList(new StepKey(phase, NAME, NAME)); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java index c1b3fb24229..5fc4b06ec57 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java @@ -111,22 +111,6 @@ public class ShrinkAction implements LifecycleAction { shrink, allocated, copyMetadata, aliasSwapAndDelete, waitOnShrinkTakeover); } - @Override - public List toStepKeys(String phase) { - StepKey conditionalSkipKey = new StepKey(phase, NAME, BranchingStep.NAME); - StepKey waitForNoFollowerStepKey = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME); - StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); - StepKey setSingleNodeKey = new StepKey(phase, NAME, SetSingleNodeAllocateStep.NAME); - StepKey checkShrinkReadyKey = new StepKey(phase, NAME, CheckShrinkReadyStep.NAME); - StepKey shrinkKey = new StepKey(phase, NAME, ShrinkStep.NAME); - StepKey enoughShardsKey = new StepKey(phase, NAME, ShrunkShardsAllocatedStep.NAME); - StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); - StepKey aliasKey = new StepKey(phase, NAME, ShrinkSetAliasStep.NAME); - StepKey isShrunkIndexKey = new StepKey(phase, NAME, ShrunkenIndexCheckStep.NAME); - return Arrays.asList(conditionalSkipKey, waitForNoFollowerStepKey, readOnlyKey, setSingleNodeKey, checkShrinkReadyKey, shrinkKey, - enoughShardsKey, copyMetadataKey, aliasKey, isShrunkIndexKey); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java index 20a0fb75b9d..d8d855b0af2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java @@ -54,19 +54,6 @@ public final class UnfollowAction implements LifecycleAction { return Arrays.asList(step1, step2, step3, step4, step5, step6, step7); } - @Override - public List toStepKeys(String phase) { - StepKey indexingCompleteStep = new StepKey(phase, NAME, WaitForIndexingCompleteStep.NAME); - StepKey waitForFollowShardTasksStep = new StepKey(phase, NAME, WaitForFollowShardTasksStep.NAME); - StepKey pauseFollowerIndexStep = new StepKey(phase, NAME, PauseFollowerIndexStep.NAME); - StepKey closeFollowerIndexStep = new StepKey(phase, NAME, CloseFollowerIndexStep.NAME); - StepKey unfollowIndexStep = new StepKey(phase, NAME, UnfollowFollowIndexStep.NAME); - StepKey openFollowerIndexStep = new StepKey(phase, NAME, OpenFollowerIndexStep.NAME); - StepKey waitForYellowStep = new StepKey(phase, NAME, WaitForYellowStep.NAME); - return Arrays.asList(indexingCompleteStep, waitForFollowShardTasksStep, pauseFollowerIndexStep, - closeFollowerIndexStep, unfollowIndexStep, openFollowerIndexStep, waitForYellowStep); - } - @Override public boolean isSafeAction() { // There are no settings to change, so therefor this action should be safe: diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java index bed04a7cf54..ab35221afec 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java @@ -7,10 +7,6 @@ package org.elasticsearch.xpack.core.indexlifecycle; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; - -import java.util.List; -import java.util.stream.Collectors; public abstract class AbstractActionTestCase extends AbstractSerializingTestCase { @@ -25,16 +21,4 @@ public abstract class AbstractActionTestCase extends assertEquals(isSafeAction(), action.isSafeAction()); } - public void testToStepKeys() { - T action = createTestInstance(); - String phase = randomAlphaOfLengthBetween(1, 10); - StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10)); - List steps = action.toSteps(null, phase, nextStepKey); - assertNotNull(steps); - List stepKeys = action.toStepKeys(phase); - assertNotNull(stepKeys); - List expectedStepKeys = steps.stream().map(Step::getKey).collect(Collectors.toList()); - assertEquals(expectedStepKeys, stepKeys); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java index 30eabac5626..9ca6dc17d3e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.io.IOException; import java.util.ArrayList; @@ -75,11 +74,6 @@ public class MockAction implements LifecycleAction { return new ArrayList<>(steps); } - @Override - public List toStepKeys(String phase) { - return steps.stream().map(Step::getKey).collect(Collectors.toList()); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(steps.stream().map(MockStep::new).collect(Collectors.toList())); @@ -103,4 +97,4 @@ public class MockAction implements LifecycleAction { return Objects.equals(steps, other.steps) && Objects.equals(safe, other.safe); } -} \ No newline at end of file +} From 912c6bdbfff805c5c93f22c4be41708cae76a031 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Sat, 11 May 2019 01:58:03 +0300 Subject: [PATCH 39/67] Prevent order being lost for _nodes API filters (#42045) (#42089) * Switch to using a list instead of a Set for the filters, so that the order of these filters is kept. (cherry picked from commit 74a743829799b64971e0ac5ae265f43f6c14e074) --- .../admin/cluster/RestNodesInfoAction.java | 21 ++- .../cluster/RestNodesInfoActionTests.java | 143 ++++++++++++++++++ 2 files changed, 156 insertions(+), 8 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java index bacc698b2a4..20370b27d43 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java @@ -36,7 +36,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestNodesInfoAction extends BaseRestHandler { - private static final Set ALLOWED_METRICS = Sets.newHashSet( + static final Set ALLOWED_METRICS = Sets.newHashSet( "http", "ingest", "indices", @@ -69,6 +69,13 @@ public class RestNodesInfoAction extends BaseRestHandler { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final NodesInfoRequest nodesInfoRequest = prepareRequest(request); + settingsFilter.addFilterSettingParams(request); + + return channel -> client.admin().cluster().nodesInfo(nodesInfoRequest, new NodesResponseRestListener<>(channel)); + } + + static NodesInfoRequest prepareRequest(final RestRequest request) { String[] nodeIds; Set metrics; @@ -76,17 +83,18 @@ public class RestNodesInfoAction extends BaseRestHandler { // still, /_nodes/_local (or any other node id) should work and be treated as usual // this means one must differentiate between allowed metrics and arbitrary node ids in the same place if (request.hasParam("nodeId") && !request.hasParam("metrics")) { - Set metricsOrNodeIds = Strings.tokenizeByCommaToSet(request.param("nodeId", "_all")); + String nodeId = request.param("nodeId", "_all"); + Set metricsOrNodeIds = Strings.tokenizeByCommaToSet(nodeId); boolean isMetricsOnly = ALLOWED_METRICS.containsAll(metricsOrNodeIds); if (isMetricsOnly) { nodeIds = new String[]{"_all"}; metrics = metricsOrNodeIds; } else { - nodeIds = metricsOrNodeIds.toArray(new String[]{}); + nodeIds = Strings.tokenizeToStringArray(nodeId, ","); metrics = Sets.newHashSet("_all"); } } else { - nodeIds = Strings.splitStringByCommaToArray(request.param("nodeId", "_all")); + nodeIds = Strings.tokenizeToStringArray(request.param("nodeId", "_all"), ","); metrics = Strings.tokenizeByCommaToSet(request.param("metrics", "_all")); } @@ -108,10 +116,7 @@ public class RestNodesInfoAction extends BaseRestHandler { nodesInfoRequest.ingest(metrics.contains("ingest")); nodesInfoRequest.indices(metrics.contains("indices")); } - - settingsFilter.addFilterSettingParams(request); - - return channel -> client.admin().cluster().nodesInfo(nodesInfoRequest, new NodesResponseRestListener<>(channel)); + return nodesInfoRequest; } @Override diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java new file mode 100644 index 00000000000..d757ee095cd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java @@ -0,0 +1,143 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.rest.action.admin.cluster.RestNodesInfoAction.ALLOWED_METRICS; + +public class RestNodesInfoActionTests extends ESTestCase { + + public void testDuplicatedFiltersAreNotRemoved() { + Map params = new HashMap<>(); + params.put("nodeId", "_all,master:false,_all"); + + RestRequest restRequest = buildRestRequest(params); + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(new String[] { "_all", "master:false", "_all" }, actual.nodesIds()); + } + + public void testOnlyMetrics() { + Map params = new HashMap<>(); + int metricsCount = randomIntBetween(1, ALLOWED_METRICS.size()); + List metrics = new ArrayList<>(); + + for(int i = 0; i < metricsCount; i++) { + metrics.add(randomFrom(ALLOWED_METRICS)); + } + params.put("nodeId", String.join(",", metrics)); + + RestRequest restRequest = buildRestRequest(params); + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(new String[] { "_all" }, actual.nodesIds()); + assertMetrics(metrics, actual); + } + + public void testAllMetricsSelectedWhenNodeAndMetricSpecified() { + Map params = new HashMap<>(); + String nodeId = randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23)); + String metric = randomFrom(ALLOWED_METRICS); + + params.put("nodeId", nodeId + "," + metric); + RestRequest restRequest = buildRestRequest(params); + + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(new String[] { nodeId, metric }, actual.nodesIds()); + assertAllMetricsTrue(actual); + } + + public void testSeparateNodeIdsAndMetrics() { + Map params = new HashMap<>(); + List nodeIds = new ArrayList<>(5); + List metrics = new ArrayList<>(5); + + for(int i = 0; i < 5; i++) { + nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23))); + metrics.add(randomFrom(ALLOWED_METRICS)); + } + + params.put("nodeId", String.join(",", nodeIds)); + params.put("metrics", String.join(",", metrics)); + RestRequest restRequest = buildRestRequest(params); + + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(nodeIds.toArray(), actual.nodesIds()); + assertMetrics(metrics, actual); + } + + public void testExplicitAllMetrics() { + Map params = new HashMap<>(); + List nodeIds = new ArrayList<>(5); + + for(int i = 0; i < 5; i++) { + nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23))); + } + + params.put("nodeId", String.join(",", nodeIds)); + params.put("metrics", "_all"); + RestRequest restRequest = buildRestRequest(params); + + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(nodeIds.toArray(), actual.nodesIds()); + assertAllMetricsTrue(actual); + } + + private FakeRestRequest buildRestRequest(Map params) { + return new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/_nodes") + .withParams(params) + .build(); + } + + private void assertMetrics(List metrics, NodesInfoRequest nodesInfoRequest) { + assertTrue((metrics.contains("http") && nodesInfoRequest.http()) || metrics.contains("http") == false); + assertTrue((metrics.contains("ingest") && nodesInfoRequest.ingest()) || metrics.contains("ingest") == false); + assertTrue((metrics.contains("indices") && nodesInfoRequest.indices()) || metrics.contains("indices") == false); + assertTrue((metrics.contains("jvm") && nodesInfoRequest.jvm()) || metrics.contains("jvm") == false); + assertTrue((metrics.contains("os") && nodesInfoRequest.os()) || metrics.contains("os") == false); + assertTrue((metrics.contains("plugins") && nodesInfoRequest.plugins()) || metrics.contains("plugins") == false); + assertTrue((metrics.contains("process") && nodesInfoRequest.process()) || metrics.contains("process") == false); + assertTrue((metrics.contains("settings") && nodesInfoRequest.settings()) || metrics.contains("settings") == false); + assertTrue((metrics.contains("thread_pool") && nodesInfoRequest.threadPool()) || metrics.contains("thread_pool") == false); + assertTrue((metrics.contains("transport") && nodesInfoRequest.transport()) || metrics.contains("transport") == false); + } + + private void assertAllMetricsTrue(NodesInfoRequest nodesInfoRequest) { + assertTrue(nodesInfoRequest.http()); + assertTrue(nodesInfoRequest.ingest()); + assertTrue(nodesInfoRequest.indices()); + assertTrue(nodesInfoRequest.jvm()); + assertTrue(nodesInfoRequest.os()); + assertTrue(nodesInfoRequest.plugins()); + assertTrue(nodesInfoRequest.process()); + assertTrue(nodesInfoRequest.settings()); + assertTrue(nodesInfoRequest.threadPool()); + assertTrue(nodesInfoRequest.transport()); + } +} From 90dce0864a169d24f6e61ac39df225a261c83a2d Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Sun, 12 May 2019 10:32:02 +1000 Subject: [PATCH 40/67] Increase the sample space for random inner hits name generator (#42057) (#42072) This commits changes the minimum length for inner hits name to avoid name collision which sometimes failed the test. --- .../org/elasticsearch/index/query/InnerHitBuilderTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 54d478a7f6a..db32c251fd3 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -146,7 +146,7 @@ public class InnerHitBuilderTests extends ESTestCase { } public static InnerHitBuilder randomInnerHits() { InnerHitBuilder innerHits = new InnerHitBuilder(); - innerHits.setName(randomAlphaOfLengthBetween(1, 16)); + innerHits.setName(randomAlphaOfLengthBetween(5, 16)); innerHits.setFrom(randomIntBetween(0, 32)); innerHits.setSize(randomIntBetween(0, 32)); innerHits.setExplain(randomBoolean()); From 58f2e916848ecc4bec5b4f9e12fb30ca55507861 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 13 May 2019 08:42:26 -0400 Subject: [PATCH 41/67] [DOCS] Rewrite 'rewrite' parameter docs (#42018) --- .../modules/indices/search-settings.asciidoc | 1 + .../query-dsl/multi-term-rewrite.asciidoc | 148 +++++++++++++----- 2 files changed, 107 insertions(+), 42 deletions(-) diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index ad75de1291c..30137fa3827 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -3,6 +3,7 @@ The following _expert_ setting can be set to manage global search limits. +[[indices-query-bool-max-clause-count]] `indices.query.bool.max_clause_count`:: Defaults to `1024`. diff --git a/docs/reference/query-dsl/multi-term-rewrite.asciidoc b/docs/reference/query-dsl/multi-term-rewrite.asciidoc index 0d327a40fde..391b42ea007 100644 --- a/docs/reference/query-dsl/multi-term-rewrite.asciidoc +++ b/docs/reference/query-dsl/multi-term-rewrite.asciidoc @@ -1,45 +1,109 @@ [[query-dsl-multi-term-rewrite]] -== Multi Term Query Rewrite +== `rewrite` Parameter -Multi term queries, like -<> and -<> are called -multi term queries and end up going through a process of rewrite. This -also happens on the -<>. -All of those queries allow to control how they will get rewritten using -the `rewrite` parameter: +WARNING: This parameter is for expert users only. Changing the value of +this parameter can impact search performance and relevance. -* `constant_score` (default): A rewrite method that performs like -`constant_score_boolean` when there are few matching terms and otherwise -visits all matching terms in sequence and marks documents for that term. -Matching documents are assigned a constant score equal to the query's -boost. -* `scoring_boolean`: A rewrite method that first translates each term -into a should clause in a boolean query, and keeps the scores as -computed by the query. Note that typically such scores are meaningless -to the user, and require non-trivial CPU to compute, so it's almost -always better to use `constant_score`. This rewrite method will hit -too many clauses failure if it exceeds the boolean query limit (defaults -to `1024`). -* `constant_score_boolean`: Similar to `scoring_boolean` except scores -are not computed. Instead, each matching document receives a constant -score equal to the query's boost. This rewrite method will hit too many -clauses failure if it exceeds the boolean query limit (defaults to -`1024`). -* `top_terms_N`: A rewrite method that first translates each term into -should clause in boolean query, and keeps the scores as computed by the -query. This rewrite method only uses the top scoring terms so it will -not overflow boolean max clause count. The `N` controls the size of the -top scoring terms to use. -* `top_terms_boost_N`: A rewrite method that first translates each term -into should clause in boolean query, but the scores are only computed as -the boost. This rewrite method only uses the top scoring terms so it -will not overflow the boolean max clause count. The `N` controls the -size of the top scoring terms to use. -* `top_terms_blended_freqs_N`: A rewrite method that first translates each -term into should clause in boolean query, but all term queries compute scores -as if they had the same frequency. In practice the frequency which is used -is the maximum frequency of all matching terms. This rewrite method only uses -the top scoring terms so it will not overflow boolean max clause count. The -`N` controls the size of the top scoring terms to use. +{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to power +indexing and searching. In their original form, Lucene cannot execute the +following queries: + +* <> +* <> +* <> +* <> +* <> + +To execute them, Lucene changes these queries to a simpler form, such as a +<> or a +https://en.wikipedia.org/wiki/Bit_array[bit set]. + +The `rewrite` parameter determines: + +* How Lucene calculates the relevance scores for each matching document +* Whether Lucene changes the original query to a `bool` +query or bit set +* If changed to a `bool` query, which `term` query clauses are included + +[float] +[[rewrite-param-valid-values]] +=== Valid values + +`constant_score` (Default):: +Uses the `constant_score_boolean` method for fewer matching terms. Otherwise, +this method finds all matching terms in sequence and returns matching documents +using a bit set. + +`constant_score_boolean`:: +Assigns each document a relevance score equal to the `boost` +parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`scoring_boolean`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`top_terms_blended_freqs_N`:: +Calculates a relevance score for each matching document as if all terms had the +same frequency. This frequency is the maximum frequency of all matching terms. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` scoring +terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_boost_N`:: +Assigns each matching document a relevance score equal to the `boost` parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_N`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query +only includes `term` queries for the top `N` scoring terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +[float] +[[rewrite-param-perf-considerations]] +=== Performance considerations for the `rewrite` parameter +For most uses, we recommend using the `constant_score`, +`constant_score_boolean`, or `top_terms_boost_N` rewrite methods. + +Other methods calculate relevance scores. These score calculations are often +expensive and do not improve query results. \ No newline at end of file From 367e0279623b250f70dd84fbcb69ab184998fae5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 14 May 2019 10:34:20 +0100 Subject: [PATCH 42/67] Log cluster UUID when committed (#42065) Today we do not expose the cluster UUID in any logs by default, but it would be useful to see it. For instance if a user starts multiple nodes as separate clusters then they will silently remain as separate clusters even if they are subsequently reconfigured to look like a single cluster. This change logs the committed cluster UUID the first time the node encounters it. --- .../cluster/coordination/CoordinationState.java | 1 + .../org/elasticsearch/cluster/coordination/Coordinator.java | 6 +++++- server/src/main/java/org/elasticsearch/node/Node.java | 6 ++++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index dff6b5add0b..11c88d06425 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -494,6 +494,7 @@ public class CoordinationState { metaDataBuilder = MetaData.builder(lastAcceptedState.metaData()); } metaDataBuilder.clusterUUIDCommitted(true); + logger.info("cluster UUID set to [{}]", lastAcceptedState.metaData().clusterUUID()); } if (metaDataBuilder != null) { setLastAcceptedState(ClusterState.builder(lastAcceptedState).metaData(metaDataBuilder).build()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 50361d12eb5..a9857611625 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -677,7 +677,11 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery coordinationState.set(new CoordinationState(settings, getLocalNode(), persistedState)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); - VotingConfiguration votingConfiguration = coordinationState.get().getLastAcceptedState().getLastCommittedConfiguration(); + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); + if (lastAcceptedState.metaData().clusterUUIDCommitted()) { + logger.info("cluster UUID [{}]", lastAcceptedState.metaData().clusterUUID()); + } + final VotingConfiguration votingConfiguration = lastAcceptedState.getLastCommittedConfiguration(); if (singleNodeDiscovery && votingConfiguration.isEmpty() == false && votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) { diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 699d032e35e..3240d3f7292 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -40,6 +40,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.InternalClusterInfoService; @@ -269,8 +270,9 @@ public class Node implements Closeable { nodeEnvironment = new NodeEnvironment(tmpSettings, environment); resourcesToClose.add(nodeEnvironment); - logger.info("node name [{}], node ID [{}]", - NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId()); + logger.info("node name [{}], node ID [{}], cluster name [{}]", + NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId(), + ClusterName.CLUSTER_NAME_SETTING.get(tmpSettings).value()); final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( From 327f44e05108558f74276748baa2a8d5a917db96 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Tue, 14 May 2019 16:29:18 -0400 Subject: [PATCH 43/67] Concurrent tests wait for threads to be ready (#42083) This change updates tests that use a CountDownLatch to synchronize the running of threads when testing concurrent operations so that we ensure the thread has been fully created and run by the scheduler. Previously, these tests used a latch with a value of 1 and the test thread counted down while the threads performing concurrent operations just waited. This change updates the value of the latch to be 1 + the number of threads. Each thread counts down and then waits. This means that each thread has been constructed and has started running. All threads will have a common start point now. --- .../elasticsearch/common/util/concurrent/CountDownTests.java | 3 ++- .../elasticsearch/common/util/concurrent/KeyedLockTests.java | 3 ++- .../org/elasticsearch/common/util/concurrent/RunOnceTests.java | 3 ++- .../org/elasticsearch/node/ResponseCollectorServiceTests.java | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java index 1a32064fe7d..46021344fb7 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java @@ -34,12 +34,13 @@ public class CountDownTests extends ESTestCase { final AtomicInteger count = new AtomicInteger(0); final CountDown countDown = new CountDown(scaledRandomIntBetween(10, 1000)); Thread[] threads = new Thread[between(3, 10)]; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(1 + threads.length); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override public void run() { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java index e50e205ff13..2160052619c 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java @@ -45,8 +45,8 @@ public class KeyedLockTests extends ESTestCase { for (int i = 0; i < names.length; i++) { names[i] = randomRealisticUnicodeOfLengthBetween(10, 20); } - CountDownLatch startLatch = new CountDownLatch(1); int numThreads = randomIntBetween(3, 10); + final CountDownLatch startLatch = new CountDownLatch(1 + numThreads); AcquireAndReleaseThread[] threads = new AcquireAndReleaseThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new AcquireAndReleaseThread(startLatch, connectionLock, names, counter, safeCounter); @@ -157,6 +157,7 @@ public class KeyedLockTests extends ESTestCase { @Override public void run() { + startLatch.countDown(); try { startLatch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java index e833edc9d56..a41d37be215 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java @@ -45,9 +45,10 @@ public class RunOnceTests extends ESTestCase { final RunOnce runOnce = new RunOnce(counter::incrementAndGet); final Thread[] threads = new Thread[between(3, 10)]; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(1 + threads.length); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(() -> { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java index 5fedfa7869e..7ac254f9948 100644 --- a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java @@ -77,9 +77,10 @@ public class ResponseCollectorServiceTests extends ESTestCase { public void testConcurrentAddingAndRemoving() throws Exception { String[] nodes = new String[] {"a", "b", "c", "d"}; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(5); Runnable f = () -> { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { From 70ea3cf8474bae838fbad207ab5c9583b644c678 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 14 May 2019 18:57:12 -0500 Subject: [PATCH 44/67] SQL: Add initial geo support (#42031) (#42135) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds an initial limited implementations of geo features to SQL. This implementation is based on the [OpenGIS® Implementation Standard for Geographic information - Simple feature access](http://www.opengeospatial.org/standards/sfs), which is the current standard for GIS system implementation. This effort is concentrate on SQL option AKA ISO 19125-2. Queries that are supported as a result of this initial implementation Metadata commands - `DESCRIBE table` - returns the correct column types `GEOMETRY` for geo shapes and geo points. - `SHOW FUNCTIONS` - returns a list that includes supported `ST_` functions - `SYS TYPES` and `SYS COLUMNS` display correct types `GEO_SHAPE` and `GEO_POINT` for geo shapes and geo points accordingly. Returning geoshapes and geopoints from elasticsearch - `SELECT geom FROM table` - returns the geoshapes and geo_points as libs/geo objects in JDBC or as WKT strings in console. - `SELECT ST_AsWKT(geom) FROM table;` and `SELECT ST_AsText(geom) FROM table;`- returns the geoshapes ang geopoints in their WKT representation; Using geopoints to elasticsearch - The following functions will be supported for geopoints in queries, sorting and aggregations: `ST_GeomFromText`, `ST_X`, `ST_Y`, `ST_Z`, `ST_GeometryType`, and `ST_Distance`. In most cases when used in queries, sorting and aggregations, these function are translated into script. These functions can be used in the SELECT clause for both geopoints and geoshapes. - `SELECT * FROM table WHERE ST_Distance(ST_GeomFromText(POINT(1 2), point) < 10;` - returns all records for which `point` is located within 10m from the `POINT(1 2)`. In this case the WHERE clause is translated into a range query. Limitations: Geoshapes cannot be used in queries, sorting and aggregations as part of this initial effort. In order to fully take advantage of geoshapes we would need to have access to geoshape doc values, which is coming in #37206. `ST_Z` cannot be used on geopoints in queries, sorting and aggregations since we don't store altitude in geo_point doc values. Relates to #29872 Backport of #42031 --- docs/reference/sql/functions/geo.asciidoc | 192 +++++ docs/reference/sql/functions/index.asciidoc | 9 + .../sql/language/data-types.asciidoc | 2 + docs/reference/sql/limitations.asciidoc | 11 + .../common/geo/parsers/ShapeParser.java | 22 + x-pack/plugin/sql/build.gradle | 1 + x-pack/plugin/sql/jdbc/build.gradle | 3 + .../elasticsearch/xpack/sql/jdbc/EsType.java | 4 +- .../xpack/sql/jdbc/ExtraTypes.java | 1 + .../xpack/sql/jdbc/JdbcColumnInfo.java | 3 +- .../xpack/sql/jdbc/JdbcConfiguration.java | 4 +- .../xpack/sql/jdbc/JdbcPreparedStatement.java | 19 +- .../xpack/sql/jdbc/TypeConverter.java | 11 + .../xpack/sql/jdbc/TypeUtils.java | 2 + x-pack/plugin/sql/qa/build.gradle | 17 +- .../sql/qa/multi_node/GeoJdbcCsvSpecIT.java | 16 + .../sql/qa/multi_node/GeoJdbcSqlSpecIT.java | 15 + .../sql/qa/single_node/GeoJdbcCsvSpecIT.java | 31 + .../sql/qa/single_node/GeoJdbcSqlSpecIT.java | 15 + .../xpack/sql/qa/geo/GeoCsvSpecTestCase.java | 79 ++ .../xpack/sql/qa/geo/GeoDataLoader.java | 158 ++++ .../xpack/sql/qa/geo/GeoSqlSpecTestCase.java | 94 +++ .../xpack/sql/qa/jdbc/CsvTestUtils.java | 16 +- .../xpack/sql/qa/jdbc/DataLoader.java | 4 +- .../xpack/sql/qa/jdbc/JdbcAssert.java | 33 + .../xpack/sql/qa/jdbc/LocalH2.java | 2 +- .../qa/src/main/resources/command.csv-spec | 11 +- .../qa/src/main/resources/docs/docs.csv-spec | 14 +- .../qa/src/main/resources/docs/geo.csv-spec | 79 ++ .../sql/qa/src/main/resources/geo/geo.csv | 16 + .../src/main/resources/geo/geosql-bulk.json | 33 + .../qa/src/main/resources/geo/geosql.csv-spec | 288 ++++++++ .../sql/qa/src/main/resources/geo/geosql.json | 28 + .../qa/src/main/resources/geo/geosql.sql-spec | 24 + .../src/main/resources/geo/setup_test_geo.sql | 9 + .../qa/src/main/resources/ogc/OGC-NOTICE.txt | 41 ++ .../qa/src/main/resources/ogc/ogc.csv-spec | 36 + .../sql/qa/src/main/resources/ogc/ogc.json | 58 ++ .../qa/src/main/resources/ogc/ogc.sql-spec | 85 +++ .../sql/qa/src/main/resources/ogc/sqltsch.sql | 672 ++++++++++++++++++ .../single-node-only/command-sys-geo.csv-spec | 15 + .../xpack/sql/analysis/analyzer/Verifier.java | 32 +- .../search/extractor/FieldHitExtractor.java | 45 +- .../xpack/sql/expression/TypeResolutions.java | 5 + .../expression/function/FunctionRegistry.java | 19 + .../function/scalar/Processors.java | 7 + .../function/scalar/geo/GeoProcessor.java | 97 +++ .../function/scalar/geo/GeoShape.java | 222 ++++++ .../function/scalar/geo/StAswkt.java | 45 ++ .../function/scalar/geo/StDistance.java | 74 ++ .../scalar/geo/StDistanceFunction.java | 27 + .../function/scalar/geo/StDistancePipe.java | 56 ++ .../scalar/geo/StDistanceProcessor.java | 87 +++ .../function/scalar/geo/StGeometryType.java | 45 ++ .../function/scalar/geo/StWkttosql.java | 67 ++ .../scalar/geo/StWkttosqlProcessor.java | 76 ++ .../expression/function/scalar/geo/StX.java | 45 ++ .../expression/function/scalar/geo/StY.java | 45 ++ .../expression/function/scalar/geo/StZ.java | 45 ++ .../function/scalar/geo/UnaryGeoFunction.java | 84 +++ .../whitelist/InternalSqlScriptUtils.java | 75 +- .../expression/gen/script/ScriptWeaver.java | 8 + .../sql/expression/literal/Intervals.java | 1 - .../sql/expression/literal/Literals.java | 2 + .../xpack/sql/planner/QueryTranslator.java | 26 + .../sql/querydsl/query/GeoDistanceQuery.java | 77 ++ .../xpack/sql/type/DataType.java | 7 + .../xpack/sql/type/DataTypes.java | 4 + .../xpack/sql/type/ExtTypes.java | 3 +- .../xpack/sql/plugin/sql_whitelist.txt | 23 +- .../analyzer/FieldAttributeTests.java | 2 +- .../analyzer/VerifierErrorMessagesTests.java | 24 + .../extractor/FieldHitExtractorTests.java | 134 ++++ .../scalar/geo/GeoProcessorTests.java | 106 +++ .../scalar/geo/StDistanceProcessorTests.java | 66 ++ .../scalar/geo/StWkttosqlProcessorTests.java | 42 ++ .../xpack/sql/optimizer/OptimizerTests.java | 10 + .../logical/command/sys/SysColumnsTests.java | 16 +- .../logical/command/sys/SysTypesTests.java | 2 +- .../sql/planner/QueryTranslatorTests.java | 107 ++- .../xpack/sql/type/TypesTests.java | 7 +- .../sql/src/test/resources/mapping-geo.json | 3 + .../mapping-multi-field-variation.json | 4 +- .../mapping-multi-field-with-nested.json | 1 + 84 files changed, 3976 insertions(+), 70 deletions(-) create mode 100644 docs/reference/sql/functions/geo.asciidoc create mode 100644 x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java create mode 100644 x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java create mode 100644 x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java create mode 100644 x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java create mode 100644 x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java create mode 100644 x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java create mode 100644 x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java create mode 100644 x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql create mode 100644 x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc new file mode 100644 index 00000000000..f5ed716eaeb --- /dev/null +++ b/docs/reference/sql/functions/geo.asciidoc @@ -0,0 +1,192 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-geo]] +=== Geo Functions + +The geo functions work with geometries stored in `geo_point` and `geo_shape` fields, or returned by other geo functions. + +==== Limitations + +Both <> and <> types are represented in SQL as geometry and can be used +interchangeably with the following exceptions: + +* `geo_shape` fields don't have doc values, therefore these fields cannot be used for filtering, grouping or sorting. + +* `geo_points` fields are indexed and have doc values by default, however only latitude and longitude are stored and + indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and + 8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. + Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. + +==== Geometry Conversion + +[[sql-functions-geo-st-as-wkt]] +===== `ST_AsWKT` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_AsWKT(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: string + +.Description: + +Returns the WKT representation of the `geometry`. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +-------------------------------------------------- + + +[[sql-functions-geo-st-wkt-to-sql]] +===== `ST_WKTToSQL` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_WKTToSQL(string<1>) +-------------------------------------------------- + +*Input*: + +<1> string WKT representation of geometry + +*Output*: geometry + +.Description: + +Returns the geometry from WKT representation. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +-------------------------------------------------- + +==== Geometry Properties + +[[sql-functions-geo-st-geometrytype]] +===== `ST_GeometryType` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_GeometryType(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: string + +.Description: + +Returns the type of the `geometry` such as POINT, MULTIPOINT, LINESTRING, MULTILINESTRING, POLYGON, MULTIPOLYGON, GEOMETRYCOLLECTION, ENVELOPE or CIRCLE. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[geometrytype] +-------------------------------------------------- + +[[sql-functions-geo-st-x]] +===== `ST_X` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_X(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the longitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[x] +-------------------------------------------------- + +[[sql-functions-geo-st-y]] +===== `ST_Y` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Y(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the the latitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[y] +-------------------------------------------------- + +[[sql-functions-geo-st-z]] +===== `ST_Z` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Z(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the altitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[z] +-------------------------------------------------- + +[[sql-functions-geo-st-distance]] +===== `ST_Distance` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Distance(geometry<1>, geometry<2>) +-------------------------------------------------- + +*Input*: + +<1> source geometry +<2> target geometry + +*Output*: Double + +.Description: + +Returns the distance between geometries in meters. Both geometries have to be points. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[distance] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 382adeecea4..248c47452ba 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -136,6 +136,14 @@ ** <> ** <> ** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> ** <> ** <> @@ -149,5 +157,6 @@ include::search.asciidoc[] include::math.asciidoc[] include::string.asciidoc[] include::type-conversion.asciidoc[] +include::geo.asciidoc[] include::conditional.asciidoc[] include::system.asciidoc[] diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 8db4c88f3a1..ad9b2a320c0 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -81,6 +81,8 @@ s|SQL precision | interval_hour_to_minute | 23 | interval_hour_to_second | 23 | interval_minute_to_second | 23 +| geo_point | 52 +| geo_shape | 2,147,483,647 |=== diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index b9c59e31b3d..c5b334480c9 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -150,3 +150,14 @@ SELECT count(*) FROM test GROUP BY MINUTE((CAST(date_created AS TIME)); ------------------------------------------------------------- SELECT HISTOGRAM(CAST(birth_date AS TIME), INTERVAL '10' MINUTES) as h, COUNT(*) FROM t GROUP BY h ------------------------------------------------------------- + +[float] +[[geo-sql-limitations]] +=== Geo-related functions + +Since `geo_shape` fields don't have doc values these fields cannot be used for filtering, grouping or sorting. + +By default,`geo_points` fields are indexed and have doc values. However only latitude and longitude are stored and +indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and +8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. +Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 21d1bd9f255..9299edc459c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -20,12 +20,18 @@ package org.elasticsearch.common.geo.parsers; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; import java.io.IOException; +import java.io.InputStream; /** * first point of entry for a shape parser @@ -67,4 +73,20 @@ public interface ShapeParser { static ShapeBuilder parse(XContentParser parser) throws IOException { return parse(parser, null); } + + static ShapeBuilder parse(Object value) throws IOException { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return parse(parser); + } + } } diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index c4719aef04a..b996f069b4c 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -16,6 +16,7 @@ ext { // SQL test dependency versions csvjdbcVersion="1.0.34" h2Version="1.4.197" + h2gisVersion="1.5.0" } configurations { diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 3c7eb6b804b..22186976d6f 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -21,6 +21,9 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } + compile (project(':libs:elasticsearch-geo')) { + transitive = false + } compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java index 52aff352ac1..51a03dad70b 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java @@ -44,7 +44,9 @@ public enum EsType implements SQLType { INTERVAL_DAY_TO_SECOND( ExtraTypes.INTERVAL_DAY_SECOND), INTERVAL_HOUR_TO_MINUTE( ExtraTypes.INTERVAL_HOUR_MINUTE), INTERVAL_HOUR_TO_SECOND( ExtraTypes.INTERVAL_HOUR_SECOND), - INTERVAL_MINUTE_TO_SECOND(ExtraTypes.INTERVAL_MINUTE_SECOND); + INTERVAL_MINUTE_TO_SECOND(ExtraTypes.INTERVAL_MINUTE_SECOND), + GEO_POINT( ExtraTypes.GEOMETRY), + GEO_SHAPE( ExtraTypes.GEOMETRY); private final Integer type; diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java index 3df70f8e1d9..b8f09ece2f3 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java @@ -29,5 +29,6 @@ class ExtraTypes { static final int INTERVAL_HOUR_MINUTE = 111; static final int INTERVAL_HOUR_SECOND = 112; static final int INTERVAL_MINUTE_SECOND = 113; + static final int GEOMETRY = 114; } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java index 9b1ff875967..5f2f0773ff1 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.sql.jdbc; import java.util.Objects; @@ -89,4 +90,4 @@ class JdbcColumnInfo { public int hashCode() { return Objects.hash(name, type, table, catalog, schema, label, displaySize); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java index 7a9154c10ac..370dc26e0d2 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java @@ -35,7 +35,7 @@ import static org.elasticsearch.xpack.sql.client.UriUtils.removeQuery; / Additional properties can be specified either through the Properties object or in the URL. In case of duplicates, the URL wins. */ //TODO: beef this up for Security/SSL -class JdbcConfiguration extends ConnectionConfiguration { +public class JdbcConfiguration extends ConnectionConfiguration { static final String URL_PREFIX = "jdbc:es://"; public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); @@ -47,7 +47,7 @@ class JdbcConfiguration extends ConnectionConfiguration { // can be out/err/url static final String DEBUG_OUTPUT_DEFAULT = "err"; - static final String TIME_ZONE = "timezone"; + public static final String TIME_ZONE = "timezone"; // follow the JDBC spec and use the JVM default... // to avoid inconsistency, the default is picked up once at startup and reused across connections // to cater to the principle of least surprise diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java index 041c457d91b..39d942362d7 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java @@ -190,7 +190,7 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { setParam(parameterIndex, null, EsType.NULL); return; } - + // check also here the unsupported types so that any unsupported interfaces ({@code java.sql.Struct}, // {@code java.sql.Array} etc) will generate the correct exception message. Otherwise, the method call // {@code TypeConverter.fromJavaToJDBC(x.getClass())} will report the implementing class as not being supported. @@ -330,7 +330,7 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { setObject(parameterIndex, xmlObject); } - + @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { setObject(parameterIndex, x, TypeUtils.asSqlType(targetSqlType), scaleOrLength); @@ -343,13 +343,12 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { private void setObject(int parameterIndex, Object x, EsType dataType, String typeString) throws SQLException { checkOpen(); - // set the null value on the type and exit if (x == null) { setParam(parameterIndex, null, dataType); return; } - + checkKnownUnsupportedTypes(x); if (x instanceof byte[]) { if (dataType != EsType.BINARY) { @@ -359,7 +358,7 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { setParam(parameterIndex, x, EsType.BINARY); return; } - + if (x instanceof Timestamp || x instanceof Calendar || x instanceof Date @@ -380,7 +379,7 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { LocalDateTime ldt = (LocalDateTime) x; Calendar cal = getDefaultCalendar(); cal.set(ldt.getYear(), ldt.getMonthValue() - 1, ldt.getDayOfMonth(), ldt.getHour(), ldt.getMinute(), ldt.getSecond()); - + dateToSet = cal.getTime(); } else if (x instanceof Time) { dateToSet = new java.util.Date(((Time) x).getTime()); @@ -398,7 +397,7 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { throw new SQLFeatureNotSupportedException( "Conversion from type [" + x.getClass().getName() + "] to [" + typeString + "] not supported"); } - + if (x instanceof Boolean || x instanceof Byte || x instanceof Short @@ -412,7 +411,7 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { dataType); return; } - + throw new SQLFeatureNotSupportedException( "Conversion from type [" + x.getClass().getName() + "] to [" + typeString + "] not supported"); } @@ -421,14 +420,14 @@ class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { List> unsupportedTypes = new ArrayList<>(Arrays.asList(Struct.class, Array.class, SQLXML.class, RowId.class, Ref.class, Blob.class, NClob.class, Clob.class, LocalDate.class, LocalTime.class, OffsetTime.class, OffsetDateTime.class, URL.class, BigDecimal.class)); - + for (Class clazz:unsupportedTypes) { if (clazz.isAssignableFrom(x.getClass())) { throw new SQLFeatureNotSupportedException("Objects of type [" + clazz.getName() + "] are not supported"); } } } - + private Calendar getDefaultCalendar() { return Calendar.getInstance(cfg.timeZone(), Locale.ROOT); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 9c30241ccbd..7e21f2206b1 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -5,13 +5,16 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.proto.StringUtils; +import java.io.IOException; import java.sql.Date; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.Time; import java.sql.Timestamp; +import java.text.ParseException; import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; @@ -100,6 +103,7 @@ final class TypeConverter { } + static long convertFromCalendarToUTC(long value, Calendar cal) { if (cal == null) { return value; @@ -239,6 +243,13 @@ final class TypeConverter { case INTERVAL_HOUR_TO_SECOND: case INTERVAL_MINUTE_TO_SECOND: return Duration.parse(v.toString()); + case GEO_POINT: + case GEO_SHAPE: + try { + return WellKnownText.fromWKT(v.toString()); + } catch (IOException | ParseException ex) { + throw new SQLException("Cannot parse geo_shape", ex); + } case IP: return v.toString(); default: diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeUtils.java index ab8465dab90..0f1df554d3f 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeUtils.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeUtils.java @@ -92,6 +92,8 @@ final class TypeUtils { types.put(EsType.INTERVAL_HOUR_TO_MINUTE, Duration.class); types.put(EsType.INTERVAL_HOUR_TO_SECOND, Duration.class); types.put(EsType.INTERVAL_MINUTE_TO_SECOND, Duration.class); + types.put(EsType.GEO_POINT, String.class); + types.put(EsType.GEO_SHAPE, String.class); TYPE_TO_CLASS = unmodifiableMap(types); diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index 4c9fa6de030..f2a6acd61a0 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -16,7 +16,12 @@ dependencies { // CLI testing dependencies compile project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') - + + // H2GIS testing dependencies + compile ("org.orbisgis:h2gis:${h2gisVersion}") { + exclude group: "org.locationtech.jts" + } + // select just the parts of JLine that are needed compile("org.jline:jline-terminal-jna:${jlineVersion}") { exclude group: "net.java.dev.jna" @@ -40,6 +45,9 @@ forbiddenApisMain { replaceSignatureFiles 'es-all-signatures', 'es-test-signatures' } +// just a test fixture: we aren't using this jars in releases and H2GIS requires disabling a lot of checks +thirdPartyAudit.enabled = false + subprojects { apply plugin: 'elasticsearch.standalone-rest-test' dependencies { @@ -56,10 +64,15 @@ subprojects { // JDBC testing dependencies testRuntime "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" testRuntime "com.h2database:h2:${h2Version}" + + // H2GIS testing dependencies + testRuntime ("org.orbisgis:h2gis:${h2gisVersion}") { + exclude group: "org.locationtech.jts" + } + testRuntime project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') testRuntime xpackProject('plugin:sql:sql-client') - // TODO check if needed testRuntime("org.antlr:antlr4-runtime:${antlrVersion}") { transitive = false diff --git a/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java new file mode 100644 index 00000000000..3b4ef0f767f --- /dev/null +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.multi_node; + +import org.elasticsearch.xpack.sql.qa.geo.GeoCsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; + +public class GeoJdbcCsvSpecIT extends GeoCsvSpecTestCase { + public GeoJdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java new file mode 100644 index 00000000000..d8a4f0b8961 --- /dev/null +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.multi_node; + +import org.elasticsearch.xpack.sql.qa.geo.GeoSqlSpecTestCase; + +public class GeoJdbcSqlSpecIT extends GeoSqlSpecTestCase { + public GeoJdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java new file mode 100644 index 00000000000..8f5352304ed --- /dev/null +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.sql.qa.geo.GeoCsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + +public class GeoJdbcCsvSpecIT extends GeoCsvSpecTestCase { + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + List list = new ArrayList<>(); + list.addAll(GeoCsvSpecTestCase.readScriptSpec()); + list.addAll(readScriptSpec("/single-node-only/command-sys-geo.csv-spec", specParser())); + return list; + } + + public GeoJdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java new file mode 100644 index 00000000000..2a9a1592c71 --- /dev/null +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.single_node; + +import org.elasticsearch.xpack.sql.qa.geo.GeoSqlSpecTestCase; + +public class GeoJdbcSqlSpecIT extends GeoSqlSpecTestCase { + public GeoJdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java new file mode 100644 index 00000000000..e40e6de9e3a --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.geo; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.jdbc.JdbcConfiguration; +import org.junit.Before; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + +/** + * Tests comparing sql queries executed against our jdbc client + * with hard coded result sets. + */ +public abstract class GeoCsvSpecTestCase extends SpecBaseIntegrationTestCase { + private final CsvTestCase testCase; + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/ogc/ogc.csv-spec", parser)); + tests.addAll(readScriptSpec("/geo/geosql.csv-spec", parser)); + tests.addAll(readScriptSpec("/docs/geo.csv-spec", parser)); + return tests; + } + + public GeoCsvSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + + @Before + public void setupTestGeoDataIfNeeded() throws Exception { + if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); + } + if (client().performRequest(new Request("HEAD", "/geo")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadGeoDatasetIntoEs(client(), "geo"); + } + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase); + Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } + + // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } + +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java new file mode 100644 index 00000000000..40e8f64be87 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.geo; + +import org.apache.http.HttpHost; +import org.apache.http.HttpStatus; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +import static org.elasticsearch.xpack.sql.qa.jdbc.DataLoader.createString; +import static org.elasticsearch.xpack.sql.qa.jdbc.DataLoader.readFromJarUrl; + +public class GeoDataLoader { + + public static void main(String[] args) throws Exception { + try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { + loadOGCDatasetIntoEs(client, "ogc"); + loadGeoDatasetIntoEs(client, "geo"); + Loggers.getLogger(GeoDataLoader.class).info("Geo data loaded"); + } + } + + protected static void loadOGCDatasetIntoEs(RestClient client, String index) throws Exception { + createIndex(client, index, createOGCIndexRequest()); + loadData(client, index, readResource("/ogc/ogc.json")); + makeFilteredAlias(client, "lakes", index, "\"term\" : { \"ogc_type\" : \"lakes\" }"); + makeFilteredAlias(client, "road_segments", index, "\"term\" : { \"ogc_type\" : \"road_segments\" }"); + makeFilteredAlias(client, "divided_routes", index, "\"term\" : { \"ogc_type\" : \"divided_routes\" }"); + makeFilteredAlias(client, "forests", index, "\"term\" : { \"ogc_type\" : \"forests\" }"); + makeFilteredAlias(client, "bridges", index, "\"term\" : { \"ogc_type\" : \"bridges\" }"); + makeFilteredAlias(client, "streams", index, "\"term\" : { \"ogc_type\" : \"streams\" }"); + makeFilteredAlias(client, "buildings", index, "\"term\" : { \"ogc_type\" : \"buildings\" }"); + makeFilteredAlias(client, "ponds", index, "\"term\" : { \"ogc_type\" : \"ponds\" }"); + makeFilteredAlias(client, "named_places", index, "\"term\" : { \"ogc_type\" : \"named_places\" }"); + makeFilteredAlias(client, "map_neatlines", index, "\"term\" : { \"ogc_type\" : \"map_neatlines\" }"); + } + + private static String createOGCIndexRequest() throws Exception { + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("properties"); + { + // Common + createIndex.startObject("ogc_type").field("type", "keyword").endObject(); + createIndex.startObject("fid").field("type", "integer").endObject(); + createString("name", createIndex); + + // Type specific + createIndex.startObject("shore").field("type", "geo_shape").endObject(); // lakes + + createString("aliases", createIndex); // road_segments + createIndex.startObject("num_lanes").field("type", "integer").endObject(); // road_segments, divided_routes + createIndex.startObject("centerline").field("type", "geo_shape").endObject(); // road_segments, streams + + createIndex.startObject("centerlines").field("type", "geo_shape").endObject(); // divided_routes + + createIndex.startObject("boundary").field("type", "geo_shape").endObject(); // forests, named_places + + createIndex.startObject("position").field("type", "geo_shape").endObject(); // bridges, buildings + + createString("address", createIndex); // buildings + createIndex.startObject("footprint").field("type", "geo_shape").endObject(); // buildings + + createIndex.startObject("type").field("type", "keyword").endObject(); // ponds + createIndex.startObject("shores").field("type", "geo_shape").endObject(); // ponds + + createIndex.startObject("neatline").field("type", "geo_shape").endObject(); // map_neatlines + + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + return Strings.toString(createIndex); + } + + private static void createIndex(RestClient client, String index, String settingsMappings) throws IOException { + Request createIndexRequest = new Request("PUT", "/" + index); + createIndexRequest.setEntity(new StringEntity(settingsMappings, ContentType.APPLICATION_JSON)); + client.performRequest(createIndexRequest); + } + + static void loadGeoDatasetIntoEs(RestClient client, String index) throws Exception { + createIndex(client, index, readResource("/geo/geosql.json")); + loadData(client, index, readResource("/geo/geosql-bulk.json")); + } + + private static void loadData(RestClient client, String index, String bulk) throws IOException { + Request request = new Request("POST", "/" + index + "/_bulk"); + request.addParameter("refresh", "true"); + request.setJsonEntity(bulk); + Response response = client.performRequest(request); + + if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { + throw new RuntimeException("Cannot load data " + response.getStatusLine()); + } + + String bulkResponseStr = EntityUtils.toString(response.getEntity()); + Map bulkResponseMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, bulkResponseStr, false); + + if ((boolean) bulkResponseMap.get("errors")) { + throw new RuntimeException("Failed to load bulk data " + bulkResponseStr); + } + } + + + public static void makeFilteredAlias(RestClient client, String aliasName, String index, String filter) throws Exception { + Request request = new Request("POST", "/" + index + "/_alias/" + aliasName); + request.setJsonEntity("{\"filter\" : { " + filter + " } }"); + client.performRequest(request); + } + + private static String readResource(String location) throws IOException { + URL dataSet = SqlSpecTestCase.class.getResource(location); + if (dataSet == null) { + throw new IllegalArgumentException("Can't find [" + location + "]"); + } + StringBuilder builder = new StringBuilder(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(dataSet), StandardCharsets.UTF_8))) { + String line = reader.readLine(); + while(line != null) { + if (line.trim().startsWith("//") == false) { + builder.append(line); + builder.append('\n'); + } + line = reader.readLine(); + } + return builder.toString(); + } + } + +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java new file mode 100644 index 00000000000..405efac5cac --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.qa.geo; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.sql.qa.jdbc.LocalH2; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.jdbc.JdbcConfiguration; +import org.h2gis.functions.factory.H2GISFunctions; +import org.junit.Before; +import org.junit.ClassRule; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Properties; + +/** + * Tests comparing geo sql queries executed against our jdbc client + * with those executed against H2GIS's jdbc client. + */ +public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { + private String query; + + @ClassRule + public static LocalH2 H2 = new LocalH2((c) -> { + // Load GIS extensions + H2GISFunctions.load(c); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/geo/setup_test_geo.sql'"); + }); + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = new SqlSpecParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/ogc/ogc.sql-spec", parser)); + tests.addAll(readScriptSpec("/geo/geosql.sql-spec", parser)); + return tests; + } + + @Before + public void setupTestGeoDataIfNeeded() throws Exception { + assumeTrue("Cannot support locales that don't use Hindu-Arabic numerals and non-ascii - sign due to H2", + "-42".equals(NumberFormat.getInstance(Locale.getDefault()).format(-42))); + if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); + } + if (client().performRequest(new Request("HEAD", "/geo")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadGeoDatasetIntoEs(client(), "geo"); + } + } + + + private static class SqlSpecParser implements Parser { + @Override + public Object parse(String line) { + return line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + } + + public GeoSqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber); + this.query = query; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection h2 = H2.get(); + Connection es = esJdbc()) { + + ResultSet expected, elasticResults; + expected = executeJdbcQuery(h2, query); + elasticResults = executeJdbcQuery(es, query); + + assertResults(expected, elasticResults); + } + } + + // TODO: use UTC for now until deciding on a strategy for handling date extraction + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 6376bd13308..daa4e5b4d0c 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -46,7 +46,7 @@ public final class CsvTestUtils { */ public static ResultSet executeCsvQuery(Connection csv, String csvTableName) throws SQLException { ResultSet expected = csv.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY) - .executeQuery("SELECT * FROM " + csvTableName); + .executeQuery("SELECT * FROM " + csvTableName); // trigger data loading for type inference expected.beforeFirst(); return expected; @@ -187,13 +187,13 @@ public final class CsvTestUtils { } else { if (line.endsWith(";")) { - // pick up the query - testCase = new CsvTestCase(); - query.append(line.substring(0, line.length() - 1).trim()); - testCase.query = query.toString(); - testCase.earlySchema = earlySchema.toString(); - earlySchema.setLength(0); - query.setLength(0); + // pick up the query + testCase = new CsvTestCase(); + query.append(line.substring(0, line.length() - 1).trim()); + testCase.query = query.toString(); + testCase.earlySchema = earlySchema.toString(); + earlySchema.setLength(0); + query.setLength(0); } // keep reading the query else { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index b12203294c1..8dca83fa759 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -57,7 +57,7 @@ public class DataLoader { makeAlias(client, "employees", "emp"); } - private static void createString(String name, XContentBuilder builder) throws Exception { + public static void createString(String name, XContentBuilder builder) throws Exception { builder.startObject(name).field("type", "text") .startObject("fields") .startObject("keyword").field("type", "keyword").endObject() @@ -286,7 +286,7 @@ public class DataLoader { Response response = client.performRequest(request); } - protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { + public static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { for (String index : indices) { client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName)); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 8931fe0264e..76894fc5a53 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -8,18 +8,25 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.hppc.IntObjectHashMap; import org.apache.logging.log4j.Logger; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.jdbc.EsType; import org.elasticsearch.xpack.sql.proto.StringUtils; import org.relique.jdbc.csv.CsvResultSet; +import java.io.IOException; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Types; +import java.text.ParseException; import java.time.temporal.TemporalAmount; import java.util.ArrayList; +import java.util.Calendar; import java.util.List; import java.util.Locale; +import java.util.TimeZone; import static java.lang.String.format; import static java.sql.Types.BIGINT; @@ -29,6 +36,8 @@ import static java.sql.Types.INTEGER; import static java.sql.Types.REAL; import static java.sql.Types.SMALLINT; import static java.sql.Types.TINYINT; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -38,6 +47,7 @@ import static org.junit.Assert.fail; * Utility class for doing JUnit-style asserts over JDBC. */ public class JdbcAssert { + private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); private static final IntObjectHashMap SQL_TO_TYPE = new IntObjectHashMap<>(); @@ -139,6 +149,11 @@ public class JdbcAssert { expectedType = Types.TIMESTAMP; } + // H2 treats GEOMETRY as OTHER + if (expectedType == Types.OTHER && nameOf(actualType).startsWith("GEO_") ) { + actualType = Types.OTHER; + } + // since csv doesn't support real, we use float instead..... if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { expectedType = Types.REAL; @@ -251,6 +266,24 @@ public class JdbcAssert { assertEquals(msg, (double) expectedObject, (double) actualObject, lenientFloatingNumbers ? 1d : 0.0d); } else if (type == Types.FLOAT) { assertEquals(msg, (float) expectedObject, (float) actualObject, lenientFloatingNumbers ? 1f : 0.0f); + } else if (type == Types.OTHER) { + if (actualObject instanceof Geometry) { + // We need to convert the expected object to libs/geo Geometry for comparision + try { + expectedObject = WellKnownText.fromWKT(expectedObject.toString()); + } catch (IOException | ParseException ex) { + fail(ex.getMessage()); + } + } + if (actualObject instanceof Point) { + // geo points are loaded form doc values where they are stored as long-encoded values leading + // to lose in precision + assertThat(expectedObject, instanceOf(Point.class)); + assertEquals(((Point) expectedObject).getLat(), ((Point) actualObject).getLat(), 0.000001d); + assertEquals(((Point) expectedObject).getLon(), ((Point) actualObject).getLon(), 0.000001d); + } else { + assertEquals(msg, expectedObject, actualObject); + } } // intervals else if (type == Types.VARCHAR && actualObject instanceof TemporalAmount) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java index e6295985cf5..2f3ce7eaddd 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java @@ -81,4 +81,4 @@ public class LocalH2 extends ExternalResource implements CheckedSupplier 0 ORDER BY "city"; + + city:s | location_wkt:s | region:s +Hong Kong |point (114.18392493389547 22.28139698971063)|Asia +; + +selectAllPointsOrderByLonFromAsWKT +SELECT city, SUBSTRING(ST_ASWKT(location), 8, LOCATE(' ', ST_ASWKT(location), 8) - 8) lon FROM "geo" ORDER BY lon; + + city:s | lon:s +London |-0.12167204171419144 +Phoenix |-111.97350500151515 +Mountain View |-122.08384302444756 +San Francisco |-122.39422800019383 +New York |-73.9900270756334 +Chicago |-87.63787407428026 +Singapore |103.8555349688977 +Munich |11.537504978477955 +Hong Kong |114.18392493389547 +Seoul |127.06085099838674 +Berlin |13.390888944268227 +Tokyo |139.76402222178876 +Sydney |151.20862897485495 +Paris |2.3517729341983795 +Amsterdam |4.850311987102032 +; + +selectAllPointsGroupByHemisphereFromAsWKT +SELECT COUNT(city) count, CAST(SUBSTRING(ST_ASWKT(location), 8, 1) = '-' AS STRING) west FROM "geo" GROUP BY west ORDER BY west; + + count:l | west:s +9 |false +6 |true +; + +selectRegionUsingWktToSql +SELECT region, city, ST_ASWKT(ST_WKTTOSQL(region_point)) region_wkt FROM geo ORDER BY region, city; + + region:s | city:s | region_wkt:s +Americas |Chicago |point (-105.2551 54.526) +Americas |Mountain View |point (-105.2551 54.526) +Americas |New York |point (-105.2551 54.526) +Americas |Phoenix |point (-105.2551 54.526) +Americas |San Francisco |point (-105.2551 54.526) +Asia |Hong Kong |point (100.6197 34.0479) +Asia |Seoul |point (100.6197 34.0479) +Asia |Singapore |point (100.6197 34.0479) +Asia |Sydney |point (100.6197 34.0479) +Asia |Tokyo |point (100.6197 34.0479) +Europe |Amsterdam |point (15.2551 54.526) +Europe |Berlin |point (15.2551 54.526) +Europe |London |point (15.2551 54.526) +Europe |Munich |point (15.2551 54.526) +Europe |Paris |point (15.2551 54.526) +; + +selectCitiesWithAGroupByWktToSql +SELECT COUNT(city) city_by_region, CAST(ST_WKTTOSQL(region_point) AS STRING) region FROM geo WHERE city LIKE '%a%' GROUP BY ST_WKTTOSQL(region_point) ORDER BY ST_WKTTOSQL(region_point); + + city_by_region:l | region:s +3 |point (-105.2551 54.526) +1 |point (100.6197 34.0479) +2 |point (15.2551 54.526) +; + +selectCitiesWithEOrderByWktToSql +SELECT region, city FROM geo WHERE city LIKE '%e%' ORDER BY ST_WKTTOSQL(region_point), city; + + region:s | city:s +Americas |Mountain View +Americas |New York +Americas |Phoenix +Asia |Seoul +Asia |Singapore +Asia |Sydney +Europe |Amsterdam +Europe |Berlin +; + + +selectCitiesByDistance +SELECT region, city, ST_Distance(location, ST_WktToSQL('POINT (-71 42)')) distance FROM geo WHERE distance < 5000000 ORDER BY region, city; + + region:s | city:s | distance:d +Americas |Chicago |1373941.5140200066 +Americas |Mountain View |4335936.909375596 +Americas |New York |285839.6579622518 +Americas |Phoenix |3692895.0346903414 +Americas |San Francisco |4343565.010996301 +; + +selectCitiesByDistanceFloored +SELECT region, city, FLOOR(ST_Distance(location, ST_WktToSQL('POINT (-71 42)'))) distance FROM geo WHERE distance < 5000000 ORDER BY region, city; + + region:s | city:s | distance:l +Americas |Chicago |1373941 +Americas |Mountain View |4335936 +Americas |New York |285839 +Americas |Phoenix |3692895 +Americas |San Francisco |4343565 +; + +selectCitiesOrderByDistance +SELECT region, city FROM geo ORDER BY ST_Distance(location, ST_WktToSQL('POINT (-71 42)')) ; + + region:s | city:s +Americas |New York +Americas |Chicago +Americas |Phoenix +Americas |Mountain View +Americas |San Francisco +Europe |London +Europe |Paris +Europe |Amsterdam +Europe |Berlin +Europe |Munich +Asia |Tokyo +Asia |Seoul +Asia |Hong Kong +Asia |Singapore +Asia |Sydney +; + +groupCitiesByDistance +SELECT COUNT(*) count, FIRST(region) region FROM geo GROUP BY FLOOR(ST_Distance(location, ST_WktToSQL('POINT (-71 42)'))/5000000); + + count:l | region:s +5 |Americas +5 |Europe +3 |Asia +2 |Asia +; + +selectWktToSqlOfNull +SELECT ST_ASWKT(ST_WktToSql(NULL)) shape; + shape:s +null +; + +selectWktToSqlOfNull +SELECT ST_Distance(ST_WktToSql(NULL), ST_WktToSQL('POINT (-71 42)')) shape; + shape:d +null +; + +groupByGeometryType +SELECT COUNT(*) cnt, ST_GeometryType(location) gt FROM geo GROUP BY ST_GeometryType(location); + + cnt:l | gt:s +15 |POINT +; + + +groupAndOrderByGeometryType +SELECT COUNT(*) cnt, ST_GeometryType(location) gt FROM geo GROUP BY gt ORDER BY gt; + + cnt:l | gt:s +15 |POINT +; + +groupByEastWest +SELECT COUNT(*) cnt, FLOOR(ST_X(location)/90) east FROM geo GROUP BY east ORDER BY east; + + cnt:l | east:l +3 |-2 +3 |-1 +4 |0 +5 |1 +; + +groupByNorthSouth +SELECT COUNT(*) cnt, FLOOR(ST_Y(location)/45) north FROM geo GROUP BY north ORDER BY north; + + cnt:l | north:l +1 |-1 +9 |0 +5 |1 +; + +groupByNorthEastSortByEastNorth +SELECT COUNT(*) cnt, FLOOR(ST_Y(location)/45) north, FLOOR(ST_X(location)/90) east FROM geo GROUP BY north, east ORDER BY east, north; + + cnt:l | north:l | east:l +3 |0 |-2 +2 |0 |-1 +1 |1 |-1 +4 |1 |0 +1 |-1 |1 +4 |0 |1 +; + +selectFilterByXOfLocation +SELECT city, ST_X(shape) x, ST_Y(shape) y, ST_Z(shape) z, ST_X(location) lx, ST_Y(location) ly FROM geo WHERE lx > 0 ORDER BY ly; + + city:s | x:d | y:d | z:d | lx:d | ly:d +Sydney |151.208629 |-33.863385 |100.0 |151.20862897485495|-33.863385021686554 +Singapore |103.855535 |1.295868 |15.0 |103.8555349688977 |1.2958679627627134 +Hong Kong |114.183925 |22.281397 |552.0 |114.18392493389547|22.28139698971063 +Tokyo |139.76402225 |35.669616 |40.0 |139.76402222178876|35.66961596254259 +Seoul |127.060851 |37.509132 |38.0 |127.06085099838674|37.50913198571652 +Munich |11.537505 |48.146321 |519.0 |11.537504978477955|48.14632098656148 +Paris |2.351773 |48.845538 |35.0 |2.3517729341983795|48.84553796611726 +Amsterdam |4.850312 |52.347557 |2.0 |4.850311987102032 |52.347556999884546 +Berlin |13.390889 |52.486701 |34.0 |13.390888944268227|52.48670099303126 +; + +selectFilterByRegionPoint +SELECT city, region, ST_X(location) x FROM geo WHERE ST_X(ST_WKTTOSQL(region_point)) < 0 ORDER BY x; + + city:s | region:s | x:d +San Francisco |Americas |-122.39422800019383 +Mountain View |Americas |-122.08384302444756 +Phoenix |Americas |-111.97350500151515 +Chicago |Americas |-87.63787407428026 +New York |Americas |-73.9900270756334 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json new file mode 100644 index 00000000000..56007a0284c --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json @@ -0,0 +1,28 @@ +{ + "settings": { + "number_of_shards": 1 + }, + "mappings": { + "properties": { + "region": { + "type": "keyword" + }, + "city": { + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "location_no_dv": { + "type": "geo_point", + "doc_values": "false" + }, + "shape": { + "type": "geo_shape" + }, + "region_point": { + "type": "keyword" + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec new file mode 100644 index 00000000000..e801d8477f6 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec @@ -0,0 +1,24 @@ +// +// Commands on geo test data +// + +selectAllShapesAsGeometries +SELECT city, shape, region FROM "geo" ORDER BY "city"; + +selectAllShapesAsWKT +SELECT city, ST_GEOMFROMTEXT(ST_ASWKT(shape)) shape_wkt, region FROM "geo" ORDER BY "city"; + +selectAllPointsAsGeometries +SELECT city, location, region FROM "geo" ORDER BY "city"; + +selectAllPointsAsWKT +SELECT city, ST_GEOMFROMTEXT(ST_ASWKT(location)) shape_wkt, region FROM "geo" ORDER BY "city"; + +selectRegionUsingWktToSqlWithoutConvertion +SELECT region, city, shape, ST_GEOMFROMTEXT(region_point) region_wkt FROM geo ORDER BY region, city; + +selectCitiesWithGroupByWktToSql +SELECT COUNT(city) city_by_region, ST_GEOMFROMTEXT(region_point) region_geom FROM geo WHERE city LIKE '%a%' GROUP BY region_geom ORDER BY city_by_region; + +selectCitiesWithOrderByWktToSql +SELECT region, city, UCASE(ST_ASWKT(ST_GEOMFROMTEXT(region_point))) region_wkt FROM geo WHERE city LIKE '%e%' ORDER BY region_wkt, city; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql b/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql new file mode 100644 index 00000000000..b8b8d4e36f4 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS "geo"; +CREATE TABLE "geo" ( + "city" VARCHAR(50), + "region" VARCHAR(50), + "region_point" VARCHAR(50), + "location" POINT, + "shape" GEOMETRY +) + AS SELECT * FROM CSVREAD('classpath:/geo/geo.csv'); diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt b/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt new file mode 100644 index 00000000000..ac061f5cc44 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt @@ -0,0 +1,41 @@ +Software Notice + +This OGC work (including software, documents, or other related items) is being +provided by the copyright holders under the following license. By obtaining, +using and/or copying this work, you (the licensee) agree that you have read, +understood, and will comply with the following terms and conditions: + +Permission to use, copy, and modify this software and its documentation, with +or without modification, for any purpose and without fee or royalty is hereby +granted, provided that you include the following on ALL copies of the software +and documentation or portions thereof, including modifications, that you make: + +1. The full text of this NOTICE in a location viewable to users of the +redistributed or derivative work. + +2. Any pre-existing intellectual property disclaimers, notices, or terms and +conditions. If none exist, a short notice of the following form (hypertext is +preferred, text is permitted) should be used within the body of any +redistributed or derivative code: "Copyright © [$date-of-document] Open +Geospatial Consortium, Inc. All Rights Reserved. +http://www.opengeospatial.org/ogc/legal (Hypertext is preferred, but a textual +representation is permitted.) + +3. Notice of any changes or modifications to the OGC files, including the date +changes were made. (We recommend you provide URIs to the location from which +the code is derived.) + + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE +NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT +THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY +ATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. + +COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENTATION. + +The name and trademarks of copyright holders may NOT be used in advertising or +publicity pertaining to the software without specific, written prior permission. +Title to copyright in this software and any associated documentation will at all +times remain with copyright holders. \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec new file mode 100644 index 00000000000..98176c849f3 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec @@ -0,0 +1,36 @@ +// +// Commands on OGC data +// + +showTables +SHOW TABLES "ogc"; + + name:s | type:s +ogc |BASE TABLE +; + +// DESCRIBE + +describe +DESCRIBE "ogc"; + + column:s | type:s | mapping:s +address | VARCHAR | text +address.keyword | VARCHAR | keyword +aliases | VARCHAR | text +aliases.keyword | VARCHAR | keyword +boundary | GEOMETRY | geo_shape +centerline | GEOMETRY | geo_shape +centerlines | GEOMETRY | geo_shape +fid | INTEGER | integer +footprint | GEOMETRY | geo_shape +name | VARCHAR | text +name.keyword | VARCHAR | keyword +neatline | GEOMETRY | geo_shape +num_lanes | INTEGER | integer +ogc_type | VARCHAR | keyword +position | GEOMETRY | geo_shape +shore | GEOMETRY | geo_shape +shores | GEOMETRY | geo_shape +type | VARCHAR | keyword +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json new file mode 100644 index 00000000000..afdf2f5d61a --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json @@ -0,0 +1,58 @@ +// This dataset is derived from OpenGIS Simple Features for SQL (Types and Functions) Test Suite on Apr 1, 2018 +// +// Copyright © 2018 Open Geospatial Consortium, Inc. All Rights Reserved. +// http://www.opengeospatial.org/ogc/legal +// +// lakes +{"index":{"_id": "101"}} +{"ogc_type":"lakes", "fid": 101, "name": "BLUE LAKE", "shore": "POLYGON ((52 18, 66 23, 73 9, 48 6, 52 18), (59 18, 67 18, 67 13, 59 13, 59 18))"} +// +// road segments +{"index":{"_id": "102"}} +{"ogc_type":"road_segments", "fid": 102, "name": "Route 5", "num_lanes": 2, "centerline": "LINESTRING (0 18, 10 21, 16 23, 28 26, 44 31)"} +{"index":{"_id": "103"}} +{"ogc_type":"road_segments", "fid": 103, "name": "Route 5", "aliases": "Main Street", "num_lanes": 4, "centerline": "LINESTRING (44 31, 56 34, 70 38)"} +{"index":{"_id": "104"}} +{"ogc_type":"road_segments", "fid": 104, "name": "Route 5", "num_lanes": 2, "centerline": "LINESTRING (70 38, 72 48)"} +{"index":{"_id": "105"}} +{"ogc_type":"road_segments", "fid": 105, "name": "Main Street", "num_lanes": 4, "centerline": "LINESTRING (70 38, 84 42)"} +{"index":{"_id": "106"}} +{"ogc_type":"road_segments", "fid": 106, "name": "Dirt Road by Green Forest", "num_lanes": 1, "centerline": "LINESTRING (28 26, 28 0)"} +// +// divided routes +{"index":{"_id": "119"}} +{"ogc_type":"divided_routes", "fid": 119, "name": "Route 75", "num_lanes": 4, "centerlines": "MULTILINESTRING ((10 48, 10 21, 10 0), (16 0, 16 23, 16 48))"} +// +// forests +{"index":{"_id": "109"}} +{"ogc_type":"forests", "fid": 109, "name": "Green Forest", "boundary": "MULTIPOLYGON (((28 26, 28 0, 84 0, 84 42, 28 26), (52 18, 66 23, 73 9, 48 6, 52 18)), ((59 18, 67 18, 67 13, 59 13, 59 18)))"} +// +// forests +{"index":{"_id": "110"}} +{"ogc_type":"bridges", "fid": 110, "name": "Cam Bridge", "position": "POINT (44 31)"} +// +// streams +{"index":{"_id": "111"}} +{"ogc_type":"streams", "fid": 111, "name": "Cam Stream", "centerline": "LINESTRING (38 48, 44 41, 41 36, 44 31, 52 18)"} +{"index":{"_id": "112"}} +{"ogc_type":"streams", "fid": 112, "centerline": "LINESTRING (76 0, 78 4, 73 9)"} +// +// buildings +{"index":{"_id": "113"}} +{"ogc_type":"buildings", "fid": 113, "address": "123 Main Street", "position": "POINT (52 30)", "footprint": "POLYGON ((50 31, 54 31, 54 29, 50 29, 50 31))"} +{"index":{"_id": "114"}} +{"ogc_type":"buildings", "fid": 114, "address": "215 Main Street", "position": "POINT (64 33)", "footprint": "POLYGON ((66 34, 62 34, 62 32, 66 32, 66 34))"} +// +// ponds +{"index":{"_id": "120"}} +{"ogc_type":"ponds", "fid": 120, "type": "Stock Pond", "shores": "MULTIPOLYGON (((24 44, 22 42, 24 40, 24 44)), ((26 44, 26 40, 28 42, 26 44)))"} +// +// named places +{"index":{"_id": "117"}} +{"ogc_type":"named_places", "fid": 117, "name": "Ashton", "boundary": "POLYGON ((62 48, 84 48, 84 30, 56 30, 56 34, 62 48))"} +{"index":{"_id": "118"}} +{"ogc_type":"named_places", "fid": 118, "name": "Goose Island", "boundary": "POLYGON ((67 13, 67 18, 59 18, 59 13, 67 13))"} +// +// map neat lines +{"index":{"_id": "115"}} +{"ogc_type":"map_neatlines", "fid": 115, "neatline": "POLYGON ((0 0, 0 48, 84 48, 84 0, 0 0))"} diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec new file mode 100644 index 00000000000..3976c5a8b18 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec @@ -0,0 +1,85 @@ +// +// Basic GEO SELECT +// + +selectLakes +SELECT fid, name, shore FROM lakes ORDER BY fid; +selectRoadSegments +SELECT fid, name, num_lanes, aliases, centerline FROM road_segments ORDER BY fid; +selectDividedRoutes +SELECT fid, name, num_lanes, centerlines FROM divided_routes ORDER BY fid; +selectForests +SELECT fid, name, boundary FROM forests ORDER BY fid; +selectBridges +SELECT fid, name, position FROM bridges ORDER BY fid; +selectStreams +SELECT fid, name, centerline FROM streams ORDER BY fid; +selectBuildings +SELECT fid, address, position, footprint FROM buildings ORDER BY fid; +selectPonds +SELECT fid, type, name, shores FROM ponds ORDER BY fid; +selectNamedPlaces +SELECT fid, name, boundary FROM named_places ORDER BY fid; +selectMapNeatLines +SELECT fid, neatline FROM map_neatlines ORDER BY fid; + +// +// Type conversion functions +// + +// The string serialization is slightly different between ES and H2, so we need to tweak it a bit by uppercasing both +// and removing floating point +selectRoadSegmentsAsWkt +SELECT fid, name, num_lanes, aliases, REPLACE(UCASE(ST_AsText(centerline)), '.0', '') centerline_wkt FROM road_segments ORDER BY fid; + +selectSinglePoint +SELECT ST_GeomFromText('point (10.0 12.0)') point; + + +// +// Geometry Property Functions +// +// H2GIS doesn't follow the standard here that mandates ST_Dimension returns SMALLINT +selectLakesProps +SELECT fid, UCASE(ST_GeometryType(shore)) type FROM lakes ORDER BY fid; +selectRoadSegmentsProps +SELECT fid, UCASE(ST_GeometryType(centerline)) type FROM road_segments ORDER BY fid; +selectDividedRoutesProps +SELECT fid, UCASE(ST_GeometryType(centerlines)) type FROM divided_routes ORDER BY fid; +selectForestsProps +SELECT fid, UCASE(ST_GeometryType(boundary)) type FROM forests ORDER BY fid; +selectBridgesProps +SELECT fid, UCASE(ST_GeometryType(position)) type FROM bridges ORDER BY fid; +selectStreamsProps +SELECT fid, UCASE(ST_GeometryType(centerline)) type FROM streams ORDER BY fid; +selectBuildingsProps +SELECT fid, UCASE(ST_GeometryType(position)) type1, UCASE(ST_GeometryType(footprint)) type2 FROM buildings ORDER BY fid; +selectPondsProps +SELECT fid, UCASE(ST_GeometryType(shores)) type FROM ponds ORDER BY fid; +selectNamedPlacesProps +SELECT fid, UCASE(ST_GeometryType(boundary)) type FROM named_places ORDER BY fid; +selectMapNeatLinesProps +SELECT fid, UCASE(ST_GeometryType(neatline)) type FROM map_neatlines ORDER BY fid; + +selectLakesXY +SELECT fid, ST_X(shore) x, ST_Y(shore) y FROM lakes ORDER BY fid; +selectRoadSegmentsXY +SELECT fid, ST_X(centerline) x, ST_Y(centerline) y FROM road_segments ORDER BY fid; +selectDividedRoutesXY +SELECT fid, ST_X(centerlines) x, ST_Y(centerlines) y FROM divided_routes ORDER BY fid; +selectForestsXY +SELECT fid, ST_X(boundary) x, ST_Y(boundary) y FROM forests ORDER BY fid; +selectBridgesPositionsXY +SELECT fid, ST_X(position) x, ST_Y(position) y FROM bridges ORDER BY fid; +selectStreamsXY +SELECT fid, ST_X(centerline) x, ST_Y(centerline) y FROM streams ORDER BY fid; +selectBuildingsXY +SELECT fid, ST_X(position) x, ST_Y(position) y FROM buildings ORDER BY fid; +selectBuildingsFootprintsXY +SELECT fid, ST_X(footprint) x, ST_Y(footprint) y FROM buildings ORDER BY fid; +selectPondsXY +SELECT fid, ST_X(shores) x, ST_Y(shores) y FROM ponds ORDER BY fid; +selectNamedPlacesXY +SELECT fid, ST_X(boundary) x, ST_Y(boundary) y FROM named_places ORDER BY fid; +selectMapNeatLinesXY +SELECT fid, ST_X(neatline) x, ST_Y(neatline) y FROM map_neatlines ORDER BY fid; diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql b/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql new file mode 100644 index 00000000000..6d1322ecd36 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql @@ -0,0 +1,672 @@ +-- FILE: sqltsch.sql 10/01/98 +-- +-- 1 2 3 4 5 6 7 8 +--345678901234567890123456789012345678901234567890123456789012345678901234567890 +--////////////////////////////////////////////////////////////////////////////// +-- +-- Copyright 1998, Open GIS Consortium, Inc. +-- +-- The material in this document details an Open GIS Consortium Test Suite in +-- accordance with a license that your organization has signed. Please refer +-- to http://www.opengeospatial.org/testing/ to obtain a copy of the general license +-- (it is part of the Conformance Testing Agreement). +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- OpenGIS Simple Features for SQL (Types and Functions) Test Suite Software +-- +-- This file "sqltsch.sql" is part 1 of a two part standardized test +-- suite in SQL script form. The other file that is required for this test +-- suite, "sqltque.sql", one additional script is provided ("sqltcle.sql") that +-- performs cleanup operations between test runs, and other documents that +-- describe the OGC Conformance Test Program are available via the WWW at +-- http://www.opengeospatial.org/testing/index.htm +-- +-- NOTE CONCERNING INFORMATION ON CONFORMANCE TESTING AND THIS TEST SUITE +-- ---------------------------------------------------------------------- +-- +-- Organizations wishing to submit product for conformance testing should +-- access the above WWW site to discover the proper procedure for obtaining +-- a license to use the OpenGIS(R) certification mark associated with this +-- test suite. +-- +-- +-- NOTE CONCERNING TEST SUITE ADAPTATION +-- ------------------------------------- +-- +-- OGC recognizes that many products will have to adapt this test suite to +-- make it work properly. OGC has documented the allowable adaptations within +-- this test suite where possible. Other information about adaptations may be +-- discovered in the Test Suite Guidelines document for this test suite. +-- +-- PLEASE NOTE THE OGC REQUIRES THAT ADAPTATIONS ARE FULLY DOCUMENTED USING +-- LIBERAL COMMENT BLOCKS CONFORMING TO THE FOLLOWING FORMAT: +-- +-- -- !#@ ADAPTATION BEGIN +-- explanatory text goes here +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- original sql goes here +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +-- adated sql goes here +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- BEGIN TEST SUITE CODE +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- Create the neccessary feature and geometry tables(views) and metadata tables +-- (views) to load and query the "Blue Lake" test data for OpenGIS Simple +-- Features for SQL (Types and Functions) test. +-- +-- Required feature tables (views) are: +-- Lakes +-- Road Segments +-- Divided Routes +-- Buildings +-- Forests +-- Bridges +-- Named Places +-- Streams +-- Ponds +-- Map Neatlines +-- +-- Please refer to the Test Suite Guidelines for this test suite for further +-- information concerning this test data. +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- CREATE SPATIAL_REF_SYS METADATA TABLE +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- +-- *** ADAPTATION ALERT **** +-- Implementations do not need to execute this statement if they already +-- create the spatial_ref_sys table or view via another mechanism. +-- The size of the srtext VARCHAR exceeds that allowed on some systems. +-- +-- CREATE TABLE spatial_ref_sys ( +-- srid INTEGER NOT NULL PRIMARY KEY, +-- auth_name VARCHAR(256), +-- auth_srid INTEGER, +-- -- srtext VARCHAR(2048) +-- srtext VARCHAR(2000) +-- ); +-- -- +-- INSERT INTO spatial_ref_sys VALUES(101, 'POSC', 32214, +-- 'PROJCS["UTM_ZONE_14N", GEOGCS["World Geodetic System 72", +-- DATUM["WGS_72", SPHEROID["NWL_10D", 6378135, 298.26]], +-- PRIMEM["Greenwich", 0], UNIT["Meter", 1.0]], +-- PROJECTION["Transverse_Mercator"], +-- PARAMETER["False_Easting", 500000.0], +-- PARAMETER["False_Northing", 0.0], +-- PARAMETER["Central_Meridian", -99.0], +-- PARAMETER["Scale_Factor", 0.9996], +-- PARAMETER["Latitude_of_origin", 0.0], +-- UNIT["Meter", 1.0]]' +-- ); +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- CREATE FEATURE SCHEMA +-- +-- *** ADAPTATION ALERT *** +-- The following schema is created using CREATE TABLE statements. +-- Furthermore, it DOES NOT create the GEOMETRY_COLUMNS metadata table. +-- Implementer's should replace the CREATE TABLES below with the mechanism +-- that it uses to create feature tables and the GEOMETRY_COLUMNS table/view +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-------------------------------------------------------------------------------- +-- +-- Create feature tables +-- +-------------------------------------------------------------------------------- +-- +-- Lakes +-- +-- +-- +-- +CREATE TABLE lakes ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + shore POLYGON +); +-- +-- Road Segments +-- +-- +-- +-- +CREATE TABLE road_segments ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + aliases VARCHAR(64), + num_lanes INTEGER, + centerline LINESTRING +); +-- +-- Divided Routes +-- +-- +-- +-- +CREATE TABLE divided_routes ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + num_lanes INTEGER, + centerlines MULTILINESTRING +); +-- +-- Forests +-- +-- +-- +-- +CREATE TABLE forests ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + boundary MULTIPOLYGON +); +-- +-- Bridges +-- +-- +-- +-- +CREATE TABLE bridges ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + position POINT +); +-- +-- Streams +-- +-- +-- +-- +CREATE TABLE streams ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + centerline LINESTRING +); +-- +-- Buildings +-- +--*** ADAPTATION ALERT *** +-- A view could be used to provide the below semantics without multiple geometry +-- columns in a table. In other words, create two tables. One table would +-- contain the POINT position and the other would create the POLYGON footprint. +-- Then create a view with the semantics of the buildings table below. +-- +-- +-- +CREATE TABLE buildings ( + fid INTEGER NOT NULL PRIMARY KEY, + address VARCHAR(64), + position POINT, + footprint POLYGON +); +-- +-- Ponds +-- +-- +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Fixes typo in the MULTIPOYLGON type +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- CREATE TABLE ponds ( +-- fid INTEGER NOT NULL PRIMARY KEY, +-- name VARCHAR(64), +-- type VARCHAR(64), +-- shores MULTIPOYLGON +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +CREATE TABLE ponds ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + type VARCHAR(64), + shores MULTIPOLYGON +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +-- Named Places +-- +-- +-- +-- +CREATE TABLE named_places ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + boundary POLYGON +); +-- +-- Map Neatline +-- +-- +-- +-- +CREATE TABLE map_neatlines ( + fid INTEGER NOT NULL PRIMARY KEY, + neatline POLYGON +); +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- POPULATE GEOMETRY AND FEATURE TABLES +-- +-- *** ADAPTATION ALERT *** +-- This script DOES NOT make any inserts into a GEOMTERY_COLUMNS table/view. +-- Implementers should insert whatever makes this happen in their implementation +-- below. Furthermore, the inserts below may be replaced by whatever mechanism +-- may be provided by implementers to insert rows in feature tables such that +-- metadata (and other mechanisms) are updated properly. +-- +--////////////////////////////////////////////////////////////////////////////// +-- +--============================================================================== +-- Lakes +-- +-- We have one lake, Blue Lake. It is a polygon with a hole. Its geometry is +-- described in WKT format as: +-- 'POLYGON( (52 18, 66 23, 73 9, 48 6, 52 18), +-- (59 18, 67 18, 67 13, 59 13, 59 18) )' +--============================================================================== +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO lakes VALUES (101, 'BLUE LAKE', +-- PolygonFromText('POLYGON((52 18,66 23,73 9,48 6,52 18),(59 18,67 18,67 13,59 13,59 18))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO lakes VALUES (101, 'BLUE LAKE', + ST_PolyFromText('POLYGON((52 18,66 23,73 9,48 6,52 18),(59 18,67 18,67 13,59 13,59 18))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Road segments +-- +-- We have five road segments. Their geometries are all linestrings. +-- The geometries are described in WKT format as: +-- name 'Route 5', fid 102 +-- 'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' +-- name 'Route 5', fid 103 +-- 'LINESTRING( 44 31, 56 34, 70 38 )' +-- name 'Route 5', fid 104 +-- 'LINESTRING( 70 38, 72 48 )' +-- name 'Main Street', fid 105 +-- 'LINESTRING( 70 38, 84 42 )' +-- name 'Dirt Road by Green Forest', fid 106 +-- 'LINESTRING( 28 26, 28 0 )' +-- +--================== +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2, +-- LineStringFromText('LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4, +-- LineStringFromText('LINESTRING( 44 31, 56 34, 70 38 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2, +-- LineStringFromText('LINESTRING( 70 38, 72 48 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4, +-- LineStringFromText('LINESTRING( 70 38, 84 42 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL, 1, +-- LineStringFromText('LINESTRING( 28 26, 28 0 )',101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2, + ST_LineFromText('LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101) +); +INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4, + ST_LineFromText('LINESTRING( 44 31, 56 34, 70 38 )' ,101) +); +INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2, + ST_LineFromText('LINESTRING( 70 38, 72 48 )' ,101) +); +INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4, + ST_LineFromText('LINESTRING( 70 38, 84 42 )' ,101) +); +INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL, 1, + ST_LineFromText('LINESTRING( 28 26, 28 0 )',101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- DividedRoutes +-- +-- We have one divided route. Its geometry is a multilinestring. +-- The geometry is described in WKT format as: +-- 'MULTILINESTRING( (10 48, 10 21, 10 0), (16 0, 10 23, 16 48) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO divided_routes VALUES(119, 'Route 75', 4, +-- MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO divided_routes VALUES(119, 'Route 75', 4, + ST_MLineFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Forests +-- +-- We have one forest. Its geometry is a multipolygon. +-- The geometry is described in WKT format as: +-- 'MULTIPOLYGON( ( (28 26, 28 0, 84 0, 84 42, 28 26), +-- (52 18, 66 23, 73 9, 48 6, 52 18) ), +-- ( (59 18, 67 18, 67 13, 59 13, 59 18) ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO forests VALUES(109, 'Green Forest', +-- MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO forests VALUES(109, 'Green Forest', + ST_MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- Bridges +-- +-- We have one bridge. Its geometry is a point. +-- The geometry is described in WKT format as: +-- 'POINT( 44 31 )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO bridges VALUES(110, 'Cam Bridge', +-- PointFromText('POINT( 44 31 )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO bridges VALUES(110, 'Cam Bridge', + ST_PointFromText('POINT( 44 31 )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Streams +-- +-- We have two streams. Their geometries are linestrings. +-- The geometries are described in WKT format as: +-- 'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )' +-- 'LINESTRING( 76 0, 78 4, 73 9 )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO streams VALUES(111, 'Cam Stream', +-- LineStringFromText('LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101) +-- ); +-- INSERT INTO streams VALUES(112, NULL, +-- LineStringFromText('LINESTRING( 76 0, 78 4, 73 9 )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO streams VALUES(111, 'Cam Stream', + ST_LineFromText('LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101) +); +INSERT INTO streams VALUES(112, NULL, + ST_LineFromText('LINESTRING( 76 0, 78 4, 73 9 )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Buildings +-- +-- We have two buildings. Their geometries are points and polygons. +-- The geometries are described in WKT format as: +-- address '123 Main Street' fid 113 +-- 'POINT( 52 30 )' and +-- 'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )' +-- address '215 Main Street' fid 114 +-- 'POINT( 64 33 )' and +-- 'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO buildings VALUES(113, '123 Main Street', +-- PointFromText('POINT( 52 30 )', 101), +-- PolygonFromText('POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101) +-- ); +-- INSERT INTO buildings VALUES(114, '215 Main Street', +-- PointFromText('POINT( 64 33 )', 101), +-- PolygonFromText('POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO buildings VALUES(113, '123 Main Street', + ST_PointFromText('POINT( 52 30 )', 101), + ST_PolyFromText('POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101) +); +INSERT INTO buildings VALUES(114, '215 Main Street', + ST_PointFromText('POINT( 64 33 )', 101), + ST_PolyFromText('POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Ponds +-- +-- We have one pond. Its geometry is a multipolygon. +-- The geometry is described in WKT format as: +-- 'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO ponds VALUES(120, NULL, 'Stock Pond', +-- MultiPolygonFromText('MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO ponds VALUES(120, NULL, 'Stock Pond', + ST_MPolyFromText('MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- Named Places +-- +-- We have two named places. Their geometries are polygons. +-- The geometries are described in WKT format as: +-- name 'Ashton' fid 117 +-- 'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )' +-- address 'Goose Island' fid 118 +-- 'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO named_places VALUES(117, 'Ashton', +-- PolygonFromText('POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101) +-- ); +-- INSERT INTO named_places VALUES(118, 'Goose Island', +-- PolygonFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO named_places VALUES(117, 'Ashton', + ST_PolyFromText('POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101) +); +INSERT INTO named_places VALUES(118, 'Goose Island', + ST_PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Map Neatlines +-- +-- We have one map neatline. Its geometry is a polygon. +-- The geometry is described in WKT format as: +-- 'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO map_neatlines VALUES(115, +-- PolygonFromText('POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO map_neatlines VALUES(115, + ST_PolyFromText('POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +-- +-- +-- end sqltsch.sql \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec new file mode 100644 index 00000000000..c9380fae280 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec @@ -0,0 +1,15 @@ +// +// Geo-specific Sys Commands +// + +geoSysColumns +SYS COLUMNS TABLE LIKE 'geo'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i|BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i| NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |city |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |2 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location_no_dv |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region_point |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |shape |114 |GEO_SHAPE |2147483647 |2147483647 |null |null |1 |null |null |114 |0 |null |6 |YES |null |null |null |null |NO |NO +; \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index db84a444f57..d5a4cb436e6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -63,6 +63,7 @@ import static org.elasticsearch.xpack.sql.stats.FeatureMetric.LIMIT; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.LOCAL; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.ORDERBY; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.WHERE; +import static org.elasticsearch.xpack.sql.type.DataType.GEO_SHAPE; /** * The verifier has the role of checking the analyzed tree for failures and build a list of failures following this check. @@ -131,7 +132,6 @@ public final class Verifier { // start bottom-up plan.forEachUp(p -> { - if (p.analyzed()) { return; } @@ -236,6 +236,7 @@ public final class Verifier { checkForScoreInsideFunctions(p, localFailures); checkNestedUsedInGroupByOrHaving(p, localFailures); + checkForGeoFunctionsOnDocValues(p, localFailures); // everything checks out // mark the plan as analyzed @@ -719,4 +720,33 @@ public final class Verifier { fail(nested.get(0), "HAVING isn't (yet) compatible with nested fields " + new AttributeSet(nested).names())); } } + + /** + * Makes sure that geo shapes do not appear in filter, aggregation and sorting contexts + */ + private static void checkForGeoFunctionsOnDocValues(LogicalPlan p, Set localFailures) { + + p.forEachDown(f -> { + f.condition().forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used for filtering")); + } + }, FieldAttribute.class); + }, Filter.class); + + // geo shape fields shouldn't be used in aggregates or having (yet) + p.forEachDown(a -> a.groupings().forEach(agg -> agg.forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used in grouping")); + } + }, FieldAttribute.class)), Aggregate.class); + + + // geo shape fields shouldn't be used in order by clauses + p.forEachDown(o -> o.order().forEach(agg -> agg.forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used for sorting")); + } + }, FieldAttribute.class)), OrderBy.class); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 652197473ab..13294fbca22 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -5,13 +5,17 @@ */ package org.elasticsearch.xpack.sql.execution.search.extractor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -128,13 +132,31 @@ public class FieldHitExtractor implements HitExtractor { if (list.isEmpty()) { return null; } else { - if (arrayLeniency || list.size() == 1) { - return unwrapMultiValue(list.get(0)); - } else { - throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + // let's make sure first that we are not dealing with an geo_point represented as an array + if (isGeoPointArray(list) == false) { + if (list.size() == 1 || arrayLeniency) { + return unwrapMultiValue(list.get(0)); + } else { + throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + } } } } + if (dataType == DataType.GEO_POINT) { + try { + GeoPoint geoPoint = GeoUtils.parseGeoPoint(values, true); + return new GeoShape(geoPoint.lon(), geoPoint.lat()); + } catch (ElasticsearchParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse geo_point value [{}] (returned by [{}])", values, fieldName); + } + } + if (dataType == DataType.GEO_SHAPE) { + try { + return new GeoShape(values); + } catch (IOException ex) { + throw new SqlIllegalArgumentException("Cannot read geo_shape value [{}] (returned by [{}])", values, fieldName); + } + } if (values instanceof Map) { throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); } @@ -149,6 +171,17 @@ public class FieldHitExtractor implements HitExtractor { throw new SqlIllegalArgumentException("Type {} (returned by [{}]) is not supported", values.getClass().getSimpleName(), fieldName); } + private boolean isGeoPointArray(List list) { + if (dataType != DataType.GEO_POINT) { + return false; + } + // we expect the point in [lon lat] or [lon lat alt] formats + if (list.size() > 3 || list.size() < 1) { + return false; + } + return list.get(0) instanceof Number; + } + @SuppressWarnings({ "unchecked", "rawtypes" }) Object extractFromSource(Map map) { Object value = null; @@ -173,7 +206,9 @@ public class FieldHitExtractor implements HitExtractor { if (node instanceof List) { List listOfValues = (List) node; - if (listOfValues.size() == 1 || arrayLeniency) { + // we can only do this optimization until the last element of our pass since geo points are using arrays + // and we don't want to blindly ignore the second element of array if arrayLeniency is enabled + if ((i < path.length - 1) && (listOfValues.size() == 1 || arrayLeniency)) { // this is a List with a size of 1 e.g.: {"a" : [{"b" : "value"}]} meaning the JSON is a list with one element // or a list of values with one element e.g.: {"a": {"b" : ["value"]}} // in case of being lenient about arrays, just extract the first value in the array diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java index f6e1e3ad8be..d382dad83a1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java @@ -57,6 +57,11 @@ public final class TypeResolutions { "date", "time", "datetime", "numeric"); } + + public static TypeResolution isGeo(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isGeo, operationName, paramOrd, "geo_point", "geo_shape"); + } + public static TypeResolution isExact(Expression e, String message) { if (e instanceof FieldAttribute) { EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index 0e9f07ef213..3a9ae062034 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -46,6 +46,13 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Quarter; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StAswkt; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StGeometryType; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosql; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StX; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StY; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StZ; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; @@ -249,11 +256,23 @@ public class FunctionRegistry { def(Space.class, Space::new, "SPACE"), def(Substring.class, Substring::new, "SUBSTRING"), def(UCase.class, UCase::new, "UCASE")); + // DataType conversion addToMap(def(Cast.class, Cast::new, "CAST", "CONVERT")); // Scalar "meta" functions addToMap(def(Database.class, Database::new, "DATABASE"), def(User.class, User::new, "USER")); + + // Geo Functions + addToMap(def(StAswkt.class, StAswkt::new, "ST_ASWKT", "ST_ASTEXT"), + def(StDistance.class, StDistance::new, "ST_DISTANCE"), + def(StWkttosql.class, StWkttosql::new, "ST_WKTTOSQL", "ST_GEOMFROMTEXT"), + def(StGeometryType.class, StGeometryType::new, "ST_GEOMETRYTYPE"), + def(StX.class, StX::new, "ST_X"), + def(StY.class, StY::new, "ST_Y"), + def(StZ.class, StZ::new, "ST_Z") + ); + // Special addToMap(def(Score.class, Score::new, "SCORE")); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index d14aeea507f..0b9bbd1094a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -11,6 +11,9 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeP import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor; @@ -98,6 +101,10 @@ public final class Processors { entries.add(new Entry(Processor.class, LocateFunctionProcessor.NAME, LocateFunctionProcessor::new)); entries.add(new Entry(Processor.class, ReplaceFunctionProcessor.NAME, ReplaceFunctionProcessor::new)); entries.add(new Entry(Processor.class, SubstringFunctionProcessor.NAME, SubstringFunctionProcessor::new)); + // geo + entries.add(new Entry(Processor.class, GeoProcessor.NAME, GeoProcessor::new)); + entries.add(new Entry(Processor.class, StWkttosqlProcessor.NAME, StWkttosqlProcessor::new)); + entries.add(new Entry(Processor.class, StDistanceProcessor.NAME, StDistanceProcessor::new)); return entries; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java new file mode 100644 index 00000000000..519e4c0c740 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.function.Function; + +public class GeoProcessor implements Processor { + + private interface GeoShapeFunction { + default R apply(Object o) { + if (o instanceof GeoShape) { + return doApply((GeoShape) o); + } else { + throw new SqlIllegalArgumentException("A geo_point or geo_shape is required; received [{}]", o); + } + } + + R doApply(GeoShape s); + } + + public enum GeoOperation { + ASWKT(GeoShape::toString), + GEOMETRY_TYPE(GeoShape::getGeometryType), + X(GeoShape::getX), + Y(GeoShape::getY), + Z(GeoShape::getZ); + + private final Function apply; + + GeoOperation(GeoShapeFunction apply) { + this.apply = l -> l == null ? null : apply.apply(l); + } + + public final Object apply(Object l) { + return apply.apply(l); + } + } + + public static final String NAME = "geo"; + + private final GeoOperation processor; + + public GeoProcessor(GeoOperation processor) { + this.processor = processor; + } + + public GeoProcessor(StreamInput in) throws IOException { + processor = in.readEnum(GeoOperation.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + return processor.apply(input); + } + + GeoOperation processor() { + return processor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + GeoProcessor other = (GeoProcessor) obj; + return processor == other.processor; + } + + @Override + public int hashCode() { + return processor.hashCode(); + } + + @Override + public String toString() { + return processor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java new file mode 100644 index 00000000000..74b5c9646b8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.geo.GeometryParser; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.io.IOException; +import java.io.InputStream; +import java.text.ParseException; +import java.util.Objects; + +/** + * Wrapper class to represent a GeoShape in SQL + * + * It is required to override the XContent serialization. The ShapeBuilder serializes using GeoJSON by default, + * but in SQL we need the serialization to be WKT-based. + */ +public class GeoShape implements ToXContentFragment, NamedWriteable { + + public static final String NAME = "geo"; + + private final Geometry shape; + + public GeoShape(double lon, double lat) { + shape = new Point(lat, lon); + } + + public GeoShape(Object value) throws IOException { + try { + shape = parse(value); + } catch (ParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [" + value + "] as a geo_shape value", ex); + } + } + + public GeoShape(StreamInput in) throws IOException { + String value = in.readString(); + try { + shape = parse(value); + } catch (ParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [" + value + "] as a geo_shape value", ex); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(WellKnownText.toWKT(shape)); + } + + @Override + public String toString() { + return WellKnownText.toWKT(shape); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(WellKnownText.toWKT(shape)); + } + + public Geometry toGeometry() { + return shape; + } + + public Point firstPoint() { + return shape.visit(new GeometryVisitor() { + @Override + public Point visit(Circle circle) { + return new Point(circle.getLat(), circle.getLon(), circle.hasAlt() ? circle.getAlt() : Double.NaN); + } + + @Override + public Point visit(GeometryCollection collection) { + if (collection.size() > 0) { + return collection.get(0).visit(this); + } + return null; + } + + @Override + public Point visit(Line line) { + if (line.length() > 0) { + return new Point(line.getLat(0), line.getLon(0), line.hasAlt() ? line.getAlt(0) : Double.NaN); + } + return null; + } + + @Override + public Point visit(LinearRing ring) { + return visit((Line) ring); + } + + @Override + public Point visit(MultiLine multiLine) { + return visit((GeometryCollection) multiLine); + } + + @Override + public Point visit(MultiPoint multiPoint) { + return visit((GeometryCollection) multiPoint); + } + + @Override + public Point visit(MultiPolygon multiPolygon) { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Point visit(Point point) { + return point; + } + + @Override + public Point visit(Polygon polygon) { + return visit(polygon.getPolygon()); + } + + @Override + public Point visit(Rectangle rectangle) { + return new Point(rectangle.getMinLat(), rectangle.getMinLon(), rectangle.getMinAlt()); + } + }); + } + + public Double getX() { + Point firstPoint = firstPoint(); + return firstPoint != null ? firstPoint.getLon() : null; + } + + public Double getY() { + Point firstPoint = firstPoint(); + return firstPoint != null ? firstPoint.getLat() : null; + } + + public Double getZ() { + Point firstPoint = firstPoint(); + return firstPoint != null && firstPoint.hasAlt() ? firstPoint.getAlt() : null; + } + + public String getGeometryType() { + return toGeometry().type().name(); + } + + public static double distance(GeoShape shape1, GeoShape shape2) { + if (shape1.shape instanceof Point == false) { + throw new SqlIllegalArgumentException("distance calculation is only supported for points; received [{}]", shape1); + } + if (shape2.shape instanceof Point == false) { + throw new SqlIllegalArgumentException("distance calculation is only supported for points; received [{}]", shape2); + } + double srcLat = ((Point) shape1.shape).getLat(); + double srcLon = ((Point) shape1.shape).getLon(); + double dstLat = ((Point) shape2.shape).getLat(); + double dstLon = ((Point) shape2.shape).getLon(); + return GeoUtils.arcDistance(srcLat, srcLon, dstLat, dstLon); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GeoShape geoShape = (GeoShape) o; + return shape.equals(geoShape.shape); + } + + @Override + public int hashCode() { + return Objects.hash(shape); + } + + @Override + public String getWriteableName() { + return NAME; + } + + private static Geometry parse(Object value) throws IOException, ParseException { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return GeometryParser.parse(parser, true, true, true); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java new file mode 100644 index 00000000000..5c4b6edbe87 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_AsWKT function that takes a geometry and returns its Well Known Text representation + */ +public class StAswkt extends UnaryGeoFunction { + + public StAswkt(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StAswkt::new, field()); + } + + @Override + protected StAswkt replaceChild(Expression newChild) { + return new StAswkt(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.ASWKT; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java new file mode 100644 index 00000000000..fd14e90dd9d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isGeo; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +/** + * Calculates the distance between two points + */ +public class StDistance extends BinaryOperator { + + private static final StDistanceFunction FUNCTION = new StDistanceFunction(); + + public StDistance(Source source, Expression source1, Expression source2) { + super(source, source1, source2, FUNCTION); + } + + @Override + protected StDistance replaceChildren(Expression newLeft, Expression newRight) { + return new StDistance(source(), newLeft, newRight); + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistance::new, left(), right()); + } + + @Override + public ScriptTemplate scriptWithField(FieldAttribute field) { + return new ScriptTemplate(processScript("{sql}.geoDocValue(doc,{})"), + paramsBuilder().variable(field.exactAttribute().name()).build(), + dataType()); + } + + @Override + protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { + return isGeo(e, sourceText(), paramOrdinal); + } + + @Override + public StDistance swapLeftAndRight() { + return new StDistance(source(), right(), left()); + } + + @Override + protected Pipe makePipe() { + return new StDistancePipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right())); + } + + @Override + protected String scriptMethodName() { + return "stDistance"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java new file mode 100644 index 00000000000..d1c15c1e2a1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.predicate.PredicateBiFunction; + +class StDistanceFunction implements PredicateBiFunction { + + @Override + public String name() { + return "ST_DISTANCE"; + } + + @Override + public String symbol() { + return "ST_DISTANCE"; + } + + @Override + public Double doApply(Object s1, Object s2) { + return StDistanceProcessor.process(s1, s2); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java new file mode 100644 index 00000000000..c9442664826 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipe; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Objects; + +public class StDistancePipe extends BinaryPipe { + + public StDistancePipe(Source source, Expression expression, Pipe left, Pipe right) { + super(source, expression, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistancePipe::new, expression(), left(), right()); + } + + @Override + protected BinaryPipe replaceChildren(Pipe left, Pipe right) { + return new StDistancePipe(source(), expression(), left, right); + } + + @Override + public StDistanceProcessor asProcessor() { + return new StDistanceProcessor(left().asProcessor(), right().asProcessor()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StDistancePipe other = (StDistancePipe) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java new file mode 100644 index 00000000000..d6c9026b982 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.BinaryProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.Objects; + +public class StDistanceProcessor extends BinaryProcessor { + + public static final String NAME = "geo_distance"; + + public StDistanceProcessor(Processor source1, Processor source2) { + super(source1, source2); + } + + public StDistanceProcessor(StreamInput in) throws IOException { + super(in); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + + } + + @Override + public Object process(Object input) { + Object l = left().process(input); + checkParameter(l); + Object r = right().process(input); + checkParameter(r); + return doProcess(l, r); + } + + @Override + protected Object doProcess(Object left, Object right) { + return process(left, right); + } + + public static Double process(Object source1, Object source2) { + if (source1 == null || source2 == null) { + return null; + } + + if (source1 instanceof GeoShape == false) { + throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source1); + } + if (source2 instanceof GeoShape == false) { + throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source2); + } + return GeoShape.distance((GeoShape) source1, (GeoShape) source2); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StDistanceProcessor other = (StDistanceProcessor) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java new file mode 100644 index 00000000000..15215bd9201 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_GEOMETRY_TYPE function that takes a geometry and returns its type + */ +public class StGeometryType extends UnaryGeoFunction { + + public StGeometryType(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StGeometryType::new, field()); + } + + @Override + protected StGeometryType replaceChild(Expression newChild) { + return new StGeometryType(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.GEOMETRY_TYPE; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java new file mode 100644 index 00000000000..3ebae55dec4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isString; + +/** + * Constructs geometric objects from their WTK representations + */ +public class StWkttosql extends UnaryScalarFunction { + + public StWkttosql(Source source, Expression field) { + super(source, field); + } + + @Override + protected StWkttosql replaceChild(Expression newChild) { + return new StWkttosql(source(), newChild); + } + + @Override + protected TypeResolution resolveType() { + if (field().dataType().isString()) { + return TypeResolution.TYPE_RESOLVED; + } + return isString(field(), functionName(), Expressions.ParamOrdinal.DEFAULT); + } + + @Override + protected Processor makeProcessor() { + return StWkttosqlProcessor.INSTANCE; + } + + @Override + public DataType dataType() { + return DataType.GEO_SHAPE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StWkttosql::new, field()); + } + + @Override + public String processScript(String script) { + return Scripts.formatTemplate(Scripts.SQL_SCRIPTS + ".stWktToSql(" + script + ")"); + } + + @Override + public Object fold() { + return StWkttosqlProcessor.INSTANCE.process(field().fold()); + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java new file mode 100644 index 00000000000..f17ee2315be --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; + +public class StWkttosqlProcessor implements Processor { + + static final StWkttosqlProcessor INSTANCE = new StWkttosqlProcessor(); + + public static final String NAME = "geo_wkttosql"; + + StWkttosqlProcessor() { + } + + public StWkttosqlProcessor(StreamInput in) throws IOException { + } + + @Override + public Object process(Object input) { + return StWkttosqlProcessor.apply(input); + } + + public static GeoShape apply(Object input) { + if (input == null) { + return null; + } + + if ((input instanceof String) == false) { + throw new SqlIllegalArgumentException("A string is required; received [{}]", input); + } + try { + return new GeoShape(input); + } catch (IOException | IllegalArgumentException | ElasticsearchParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [{}] as a geo_shape value", input); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java new file mode 100644 index 00000000000..f3cdafbe70d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_X function that takes a geometry and returns the X coordinate of its first point + */ +public class StX extends UnaryGeoFunction { + + public StX(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StX::new, field()); + } + + @Override + protected StX replaceChild(Expression newChild) { + return new StX(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.X; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java new file mode 100644 index 00000000000..0a9bc3aa1a4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_Y function that takes a geometry and returns the Y coordinate of its first point + */ +public class StY extends UnaryGeoFunction { + + public StY(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StY::new, field()); + } + + @Override + protected StY replaceChild(Expression newChild) { + return new StY(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.Y; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java new file mode 100644 index 00000000000..b6c0c9466bb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_Z function that takes a geometry and returns the Z coordinate of its first point + */ +public class StZ extends UnaryGeoFunction { + + public StZ(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StZ::new, field()); + } + + @Override + protected StZ replaceChild(Expression newChild) { + return new StZ(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.Z; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java new file mode 100644 index 00000000000..50c05b7fbed --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isGeo; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +/** + * Base class for functions that get a single geo shape or geo point as an argument + */ +public abstract class UnaryGeoFunction extends UnaryScalarFunction { + + protected UnaryGeoFunction(Source source, Expression field) { + super(source, field); + } + + @Override + public Object fold() { + return operation().apply(field().fold()); + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + return isGeo(field(), operation().toString(), Expressions.ParamOrdinal.DEFAULT); + } + + @Override + protected Processor makeProcessor() { + return new GeoProcessor(operation()); + } + + protected abstract GeoProcessor.GeoOperation operation(); + + @Override + public ScriptTemplate scriptWithField(FieldAttribute field) { + //TODO change this to use _source instead of the exact form (aka field.keyword for geo shape fields) + return new ScriptTemplate(processScript("{sql}.geoDocValue(doc,{})"), + paramsBuilder().variable(field.exactAttribute().name()).build(), + dataType()); + } + + @Override + public String processScript(String template) { + // basically, transform the script to InternalSqlScriptUtils.[function_name](other_function_or_field_name) + return super.processScript( + format(Locale.ROOT, "{sql}.%s(%s)", + StringUtils.underscoreToLowerCamelCase("ST_" + operation().name()), + template)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + UnaryGeoFunction other = (UnaryGeoFunction) obj; + return Objects.equals(other.field(), field()); + } + + @Override + public int hashCode() { + return Objects.hash(field()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 6a4ec411fe1..d39aec44236 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; @@ -12,6 +13,10 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeF import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor.NonIsoDateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; @@ -73,7 +78,7 @@ public final class InternalSqlScriptUtils { } return null; } - + public static boolean nullSafeFilter(Boolean filter) { return filter == null ? false : filter.booleanValue(); } @@ -109,7 +114,7 @@ public final class InternalSqlScriptUtils { public static Boolean lt(Object left, Object right) { return BinaryComparisonOperation.LT.apply(left, right); } - + public static Boolean lte(Object left, Object right) { return BinaryComparisonOperation.LTE.apply(left, right); } @@ -125,7 +130,7 @@ public final class InternalSqlScriptUtils { public static Boolean and(Boolean left, Boolean right) { return BinaryLogicOperation.AND.apply(left, right); } - + public static Boolean or(Boolean left, Boolean right) { return BinaryLogicOperation.OR.apply(left, right); } @@ -328,14 +333,14 @@ public final class InternalSqlScriptUtils { } return DateTimeFunction.dateTimeChrono(asDateTime(dateTime), tzId, chronoName); } - + public static String dayName(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; } return NameExtractor.DAY_NAME.extract(asDateTime(dateTime), tzId); } - + public static Integer dayOfWeek(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; @@ -349,7 +354,7 @@ public final class InternalSqlScriptUtils { } return NameExtractor.MONTH_NAME.extract(asDateTime(dateTime), tzId); } - + public static Integer quarter(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; @@ -390,7 +395,7 @@ public final class InternalSqlScriptUtils { } return dateTime; } - + public static IntervalDayTime intervalDayTime(String text, String typeName) { if (text == null || typeName == null) { return null; @@ -416,7 +421,7 @@ public final class InternalSqlScriptUtils { public static Integer ascii(String s) { return (Integer) StringOperation.ASCII.apply(s); } - + public static Integer bitLength(String s) { return (Integer) StringOperation.BIT_LENGTH.apply(s); } @@ -428,7 +433,7 @@ public final class InternalSqlScriptUtils { public static Integer charLength(String s) { return (Integer) StringOperation.CHAR_LENGTH.apply(s); } - + public static String concat(String s1, String s2) { return (String) ConcatFunctionProcessor.process(s1, s2); } @@ -452,7 +457,7 @@ public final class InternalSqlScriptUtils { public static Integer locate(String s1, String s2) { return locate(s1, s2, null); } - + public static Integer locate(String s1, String s2, Number pos) { return LocateFunctionProcessor.doProcess(s1, s2, pos); } @@ -460,7 +465,7 @@ public final class InternalSqlScriptUtils { public static String ltrim(String s) { return (String) StringOperation.LTRIM.apply(s); } - + public static Integer octetLength(String s) { return (Integer) StringOperation.OCTET_LENGTH.apply(s); } @@ -468,15 +473,15 @@ public final class InternalSqlScriptUtils { public static Integer position(String s1, String s2) { return (Integer) BinaryStringStringOperation.POSITION.apply(s1, s2); } - + public static String repeat(String s, Number count) { return BinaryStringNumericOperation.REPEAT.apply(s, count); } - + public static String replace(String s1, String s2, String s3) { return (String) ReplaceFunctionProcessor.doProcess(s1, s2, s3); } - + public static String right(String s, Number count) { return BinaryStringNumericOperation.RIGHT.apply(s, count); } @@ -496,7 +501,47 @@ public final class InternalSqlScriptUtils { public static String ucase(String s) { return (String) StringOperation.UCASE.apply(s); } - + + public static String stAswkt(Object v) { + return GeoProcessor.GeoOperation.ASWKT.apply(v).toString(); + } + + public static GeoShape stWktToSql(String wktString) { + return StWkttosqlProcessor.apply(wktString); + } + + public static Double stDistance(Object v1, Object v2) { + return StDistanceProcessor.process(v1, v2); + } + + public static String stGeometryType(Object g) { + return (String) GeoProcessor.GeoOperation.GEOMETRY_TYPE.apply(g); + } + + public static Double stX(Object g) { + return (Double) GeoProcessor.GeoOperation.X.apply(g); + } + + public static Double stY(Object g) { + return (Double) GeoProcessor.GeoOperation.Y.apply(g); + } + + public static Double stZ(Object g) { + return (Double) GeoProcessor.GeoOperation.Z.apply(g); + } + + // processes doc value as a geometry + public static GeoShape geoDocValue(Map> doc, String fieldName) { + Object obj = docValue(doc, fieldName); + if (obj != null) { + if (obj instanceof GeoPoint) { + return new GeoShape(((GeoPoint) obj).getLon(), ((GeoPoint) obj).getLat()); + } + // TODO: Add support for geo_shapes when it is there + } + return null; + } + // // Casting // diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java index b24ec56727d..223e22b2a33 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime; import org.elasticsearch.xpack.sql.expression.literal.IntervalYearMonth; import org.elasticsearch.xpack.sql.type.DataType; @@ -95,6 +96,13 @@ public interface ScriptWeaver { dataType()); } + if (fold instanceof GeoShape) { + GeoShape geoShape = (GeoShape) fold; + return new ScriptTemplate(processScript("{sql}.stWktToSql({})"), + paramsBuilder().variable(geoShape.toString()).build(), + dataType()); + } + return new ScriptTemplate(processScript("{}"), paramsBuilder().variable(fold).build(), dataType()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java index b06a1fb8874..ed7dc9da775 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java @@ -408,5 +408,4 @@ public final class Intervals { public static TemporalAmount parseInterval(Source source, String value, DataType intervalType) { return PARSERS.get(intervalType).parse(source, value); } - } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java index 333ba3f11c0..d6bdeeb0fe4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.literal; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import java.util.ArrayList; import java.util.Collection; @@ -30,6 +31,7 @@ public final class Literals { entries.add(new NamedWriteableRegistry.Entry(IntervalDayTime.class, IntervalDayTime.NAME, IntervalDayTime::new)); entries.add(new NamedWriteableRegistry.Entry(IntervalYearMonth.class, IntervalYearMonth.NAME, IntervalYearMonth::new)); + entries.add(new NamedWriteableRegistry.Entry(GeoShape.class, GeoShape.NAME, GeoShape::new)); return entries; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 8495b0269eb..7e5516810d9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.sql.planner; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Attribute; @@ -38,6 +40,8 @@ import org.elasticsearch.xpack.sql.expression.function.grouping.Histogram; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.expression.literal.Intervals; import org.elasticsearch.xpack.sql.expression.predicate.Range; @@ -85,6 +89,7 @@ import org.elasticsearch.xpack.sql.querydsl.agg.SumAgg; import org.elasticsearch.xpack.sql.querydsl.agg.TopHitsAgg; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.GeoDistanceQuery; import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; import org.elasticsearch.xpack.sql.querydsl.query.MultiMatchQuery; import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; @@ -656,6 +661,24 @@ final class QueryTranslator { Object value = valueOf(bc.right()); String format = dateFormat(bc.left()); + // Possible geo optimization + if (bc.left() instanceof StDistance && value instanceof Number) { + if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { + // Special case for ST_Distance translatable into geo_distance query + StDistance stDistance = (StDistance) bc.left(); + if (stDistance.left() instanceof FieldAttribute && stDistance.right().foldable()) { + Object geoShape = valueOf(stDistance.right()); + if (geoShape instanceof GeoShape) { + Geometry geometry = ((GeoShape) geoShape).toGeometry(); + if (geometry instanceof Point) { + String field = nameOf(stDistance.left()); + return new GeoDistanceQuery(source, field, ((Number) value).doubleValue(), + ((Point) geometry).getLat(), ((Point) geometry).getLon()); + } + } + } + } + } if (bc instanceof GreaterThan) { return new RangeQuery(source, name, value, false, null, false, format); } @@ -954,6 +977,9 @@ final class QueryTranslator { protected static Query handleQuery(ScalarFunction sf, Expression field, Supplier query) { Query q = query.get(); + if (field instanceof StDistance && q instanceof GeoDistanceQuery) { + return wrapIfNested(q, ((StDistance) field).left()); + } if (field instanceof FieldAttribute) { return wrapIfNested(q, field); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java new file mode 100644 index 00000000000..dd1a1171c16 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Objects; + +public class GeoDistanceQuery extends LeafQuery { + + private final String field; + private final double lat; + private final double lon; + private final double distance; + + public GeoDistanceQuery(Source source, String field, double distance, double lat, double lon) { + super(source); + this.field = field; + this.distance = distance; + this.lat = lat; + this.lon = lon; + } + + public String field() { + return field; + } + + public double lat() { + return lat; + } + + public double lon() { + return lon; + } + + public double distance() { + return distance; + } + + @Override + public QueryBuilder asBuilder() { + return QueryBuilders.geoDistanceQuery(field).distance(distance, DistanceUnit.METERS).point(lat, lon); + } + + @Override + public int hashCode() { + return Objects.hash(field, distance, lat, lon); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GeoDistanceQuery other = (GeoDistanceQuery) obj; + return Objects.equals(field, other.field) && + Objects.equals(distance, other.distance) && + Objects.equals(lat, other.lat) && + Objects.equals(lon, other.lon); + } + + @Override + protected String innerToString() { + return field + ":" + "(" + distance + "," + "(" + lat + ", " + lon + "))"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 1f04e7c8e19..76f2436e862 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -53,6 +53,9 @@ public enum DataType { // // specialized types // + GEO_SHAPE( ExtTypes.GEOMETRY, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), + // display size = 2 doubles + len("POINT( )") + GEO_POINT( ExtTypes.GEOMETRY, Double.BYTES*2, Integer.MAX_VALUE, 25 * 2 + 8, false, false, false), // IP can be v4 or v6. The latter has 2^128 addresses or 340,282,366,920,938,463,463,374,607,431,768,211,456 // aka 39 chars IP( "ip", JDBCType.VARCHAR, 39, 39, 0, false, false, true), @@ -251,6 +254,10 @@ public enum DataType { return this != OBJECT && this != NESTED && this != UNSUPPORTED; } + public boolean isGeo() { + return this == GEO_POINT || this == GEO_SHAPE; + } + public boolean isDateBased() { return this == DATE || this == DATETIME; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index dcd6a1b35a1..3f985ae4e3b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.type; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.literal.Interval; import java.time.OffsetTime; @@ -81,6 +82,9 @@ public final class DataTypes { if (value instanceof Interval) { return ((Interval) value).dataType(); } + if (value instanceof GeoShape) { + return DataType.GEO_SHAPE; + } throw new SqlIllegalArgumentException("No idea what's the DataType for {}", value.getClass()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java index 1ad9dd92abf..2c07be3eb62 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java @@ -27,7 +27,8 @@ enum ExtTypes implements SQLType { INTERVAL_DAY_TO_SECOND(110), INTERVAL_HOUR_TO_MINUTE(111), INTERVAL_HOUR_TO_SECOND(112), - INTERVAL_MINUTE_TO_SECOND(113); + INTERVAL_MINUTE_TO_SECOND(113), + GEOMETRY(114); private final Integer type; diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 4ac4632572c..6d24ea79f2b 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -4,7 +4,14 @@ # you may not use this file except in compliance with the Elastic License. # -# This file contains a whitelist for SQL specific utilities available inside SQL scripting +# This file contains a whitelist for SQL specific utilities and classes available inside SQL scripting + +#### Classes + +class org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape { + +} + class org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime { } @@ -137,7 +144,19 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS String space(Number) String substring(String, Number, Number) String ucase(String) - + +# +# Geo Functions +# + GeoShape geoDocValue(java.util.Map, String) + String stAswkt(Object) + Double stDistance(Object, Object) + String stGeometryType(Object) + GeoShape stWktToSql(String) + Double stX(Object) + Double stY(Object) + Double stZ(Object) + # # Casting # diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java index bc7b85b5392..b36111ffac3 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -158,7 +158,7 @@ public class FieldAttributeTests extends ESTestCase { public void testStarExpansionExcludesObjectAndUnsupportedTypes() { LogicalPlan plan = plan("SELECT * FROM test"); List list = ((Project) plan).projections(); - assertThat(list, hasSize(8)); + assertThat(list, hasSize(10)); List names = Expressions.names(list); assertThat(names, not(hasItem("some"))); assertThat(names, not(hasItem("some.dotted"))); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index dcf8dad5ecb..609e6a52c3e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -773,4 +773,28 @@ public class VerifierErrorMessagesTests extends ESTestCase { public void testProjectUnresolvedAliasInFilter() { assertEquals("1:8: Unknown column [tni]", error("SELECT tni AS i FROM test WHERE i > 10 GROUP BY i")); } + + public void testGeoShapeInWhereClause() { + assertEquals("1:49: geo shapes cannot be used for filtering", + error("SELECT ST_AsWKT(shape) FROM test WHERE ST_AsWKT(shape) = 'point (10 20)'")); + + // We get only one message back because the messages are grouped by the node that caused the issue + assertEquals("1:46: geo shapes cannot be used for filtering", + error("SELECT MAX(ST_X(shape)) FROM test WHERE ST_Y(shape) > 10 GROUP BY ST_GEOMETRYTYPE(shape) ORDER BY ST_ASWKT(shape)")); + } + + public void testGeoShapeInGroupBy() { + assertEquals("1:44: geo shapes cannot be used in grouping", + error("SELECT ST_X(shape) FROM test GROUP BY ST_X(shape)")); + } + + public void testGeoShapeInOrderBy() { + assertEquals("1:44: geo shapes cannot be used for sorting", + error("SELECT ST_X(shape) FROM test ORDER BY ST_Z(shape)")); + } + + public void testGeoShapeInSelect() { + accept("SELECT ST_X(shape) FROM test"); + } + } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 973d5b50fad..50a3b185dba 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -451,6 +452,125 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase map = new HashMap<>(); + map.put(fieldName, "POINT (1 2)"); + assertEquals(new GeoShape(1, 2), fe.extractFromSource(map)); + + map = new HashMap<>(); + assertNull(fe.extractFromSource(map)); + } + + + public void testMultipleGeoShapeExtraction() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false); + Map map = new HashMap<>(); + map.put(fieldName, "POINT (1 2)"); + assertEquals(new GeoShape(1, 2), fe.extractFromSource(map)); + + map = new HashMap<>(); + assertNull(fe.extractFromSource(map)); + + Map map2 = new HashMap<>(); + map2.put(fieldName, Arrays.asList("POINT (1 2)", "POINT (3 4)")); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map2)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false, true); + assertEquals(new GeoShape(1, 2), lenientFe.extractFromSource(map2)); + } + + public void testGeoPointExtractionFromSource() throws IOException { + int layers = randomIntBetween(1, 3); + String pathCombined = ""; + double lat = randomDoubleBetween(-90, 90, true); + double lon = randomDoubleBetween(-180, 180, true); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + boolean[] arrayWrap = new boolean[layers - 1]; + source.startObject(); { + for (int i = 0; i < layers - 1; i++) { + arrayWrap[i] = randomBoolean(); + String name = randomAlphaOfLength(10); + source.field(name); + if (arrayWrap[i]) { + source.startArray(); + } + source.startObject(); + pathCombined = pathCombined + name + "."; + } + String name = randomAlphaOfLength(10); + pathCombined = pathCombined + name; + source.field(name, randomPoint(lat, lon)); + for (int i = layers - 2; i >= 0; i--) { + source.endObject(); + if (arrayWrap[i]) { + source.endArray(); + } + } + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + + FieldHitExtractor fe = new FieldHitExtractor(pathCombined, DataType.GEO_POINT, UTC, false); + assertEquals(new GeoShape(lon, lat), fe.extract(hit)); + } + + public void testMultipleGeoPointExtractionFromSource() throws IOException { + double lat = randomDoubleBetween(-90, 90, true); + double lon = randomDoubleBetween(-180, 180, true); + SearchHit hit = new SearchHit(1); + String fieldName = randomAlphaOfLength(5); + int arraySize = randomIntBetween(2, 4); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); { + source.startArray(fieldName); + source.value(randomPoint(lat, lon)); + for (int i = 1; i < arraySize; i++) { + source.value(randomPoint(lat, lon)); + } + source.endArray(); + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, false); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, false, true); + assertEquals(new GeoShape(lon, lat), lenientFe.extract(hit)); + } + + public void testGeoPointExtractionFromDocValues() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, singletonList("2, 1")); + hit.fields(singletonMap(fieldName, field)); + assertEquals(new GeoShape(1, 2), fe.extract(hit)); + hit = new SearchHit(1); + assertNull(fe.extract(hit)); + } + + public void testGeoPointExtractionFromMultipleDocValues() { + String fieldName = randomAlphaOfLength(5); + SearchHit hit = new SearchHit(1); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true); + + hit.fields(singletonMap(fieldName, new DocumentField(fieldName, Arrays.asList("2,1", "3,4")))); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true, true); + assertEquals(new GeoShape(1, 2), lenientFe.extract(hit)); + } + private FieldHitExtractor getFieldHitExtractor(String fieldName, boolean useDocValue) { return new FieldHitExtractor(fieldName, null, UTC, useDocValue); } @@ -471,4 +591,18 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase value = randomFrom(Arrays.asList( + () -> lat + "," + lon, + () -> Arrays.asList(lon, lat), + () -> { + Map map1 = new HashMap<>(); + map1.put("lat", lat); + map1.put("lon", lon); + return map1; + } + )); + return value.get(); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java new file mode 100644 index 00000000000..07cc6171cf0 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; + +import java.io.IOException; + +public class GeoProcessorTests extends AbstractWireSerializingTestCase { + public static GeoProcessor randomGeoProcessor() { + return new GeoProcessor(randomFrom(GeoOperation.values())); + } + + @Override + protected GeoProcessor createTestInstance() { + return randomGeoProcessor(); + } + + @Override + protected Reader instanceReader() { + return GeoProcessor::new; + } + + @Override + protected GeoProcessor mutateInstance(GeoProcessor instance) throws IOException { + return new GeoProcessor(randomValueOtherThan(instance.processor(), () -> randomFrom(GeoOperation.values()))); + } + + public void testApplyAsWKT() throws Exception { + assertEquals("point (10.0 20.0)", new GeoProcessor(GeoOperation.ASWKT).process(new GeoShape(10, 20))); + assertEquals("point (10.0 20.0)", new GeoProcessor(GeoOperation.ASWKT).process(new GeoShape("POINT (10 20)"))); + } + + public void testApplyGeometryType() throws Exception { + assertEquals("POINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape(10, 20))); + assertEquals("POINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("POINT (10 20)"))); + assertEquals("MULTIPOINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals("LINESTRING", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals("POLYGON", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals("MULTILINESTRING", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + assertEquals("MULTIPOLYGON", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + assertEquals("ENVELOPE", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + assertEquals("GEOMETRYCOLLECTION", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + } + + + public void testApplyGetXYZ() throws Exception { + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape(10, 20))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape(10, 20))); + assertNull(new GeoProcessor(GeoOperation.Z).process(new GeoShape(10, 20))); + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("POINT (10 20)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("POINT (10 20)"))); + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(30.0, new GeoProcessor(GeoOperation.Z).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(2.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + // minX minX, maxX, maxY, minY + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + // minY minX, maxX, maxY, minY + assertEquals(30.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.X).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + assertEquals(10.0, new GeoProcessor(GeoOperation.Y).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + } + + public void testApplyGetXYZToPolygons() throws Exception { + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertNull(new GeoProcessor(GeoOperation.Z).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals(5.0, new GeoProcessor(GeoOperation.Z).process( + new GeoShape("polygon ((3.0 1.0 5.0, 4.0 2.0 6.0, 4.0 3.0 7.0, 3.0 1.0 5.0))"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + } + + public void testApplyNull() { + for (GeoOperation op : GeoOperation.values()) { + GeoProcessor proc = new GeoProcessor(op); + assertNull(proc.process(null)); + } + } + + public void testTypeCheck() { + GeoProcessor proc = new GeoProcessor(GeoOperation.ASWKT); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("string")); + assertEquals("A geo_point or geo_shape is required; received [string]", siae.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java new file mode 100644 index 00000000000..9f78f8b3df4 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.gen.processor.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils.l; +import static org.elasticsearch.xpack.sql.tree.Source.EMPTY; +import static org.hamcrest.Matchers.instanceOf; + +public class StDistanceProcessorTests extends AbstractWireSerializingTestCase { + + public StDistanceProcessor createTestInstance() { + return new StDistanceProcessor( + constantPoint(randomDoubleBetween(-180, 180, true), randomDoubleBetween(-90, 90, true)), + constantPoint(randomDoubleBetween(-180, 180, true), randomDoubleBetween(-90, 90, true)) + ); + } + + public static Processor constantPoint(double lon, double lat) { + return new ChainingProcessor(new ConstantProcessor("point (" + lon + " " + lat + ")"), StWkttosqlProcessor.INSTANCE); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testApply() { + StDistanceProcessor proc = new StDistanceProcessor(constantPoint(10, 20), constantPoint(30, 40)); + Object result = proc.process(null); + assertThat(result, instanceOf(Double.class)); + assertEquals(GeoUtils.arcDistance(20, 10, 40, 30), (double) result, 0.000001); + } + + public void testNullHandling() { + assertNull(new StDistance(EMPTY, l(new GeoShape(1, 2)), l(null)).makePipe().asProcessor().process(null)); + assertNull(new StDistance(EMPTY, l(null), l(new GeoShape(1, 2))).makePipe().asProcessor().process(null)); + } + + public void testTypeCheck() { + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, + () -> new StDistance(EMPTY, l("foo"), l(new GeoShape(1, 2))).makePipe().asProcessor().process(null)); + assertEquals("A geo_point or geo_shape with type point is required; received [foo]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, + () -> new StDistance(EMPTY, l(new GeoShape(1, 2)), l("bar")).makePipe().asProcessor().process(null)); + assertEquals("A geo_point or geo_shape with type point is required; received [bar]", siae.getMessage()); + } + + @Override + protected Writeable.Reader instanceReader() { + return StDistanceProcessor::new; + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java new file mode 100644 index 00000000000..fc7b33ae905 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import static org.hamcrest.Matchers.instanceOf; + +public class StWkttosqlProcessorTests extends ESTestCase { + public static StWkttosqlProcessor randomStWkttosqlProcessor() { + return new StWkttosqlProcessor(); + } + + public void testApply() { + StWkttosqlProcessor proc = new StWkttosqlProcessor(); + assertNull(proc.process(null)); + Object result = proc.process("POINT (10 20)"); + assertThat(result, instanceOf(GeoShape.class)); + GeoShape geoShape = (GeoShape) result; + assertEquals("point (10.0 20.0)", geoShape.toString()); + } + + public void testTypeCheck() { + StWkttosqlProcessor procPoint = new StWkttosqlProcessor(); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process(42)); + assertEquals("A string is required; received [42]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("some random string")); + assertEquals("Cannot parse [some random string] as a geo_shape value", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("point (foo bar)")); + assertEquals("Cannot parse [point (foo bar)] as a geo_shape value", siae.getMessage()); + + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("point (10 10")); + assertEquals("Cannot parse [point (10 10] as a geo_shape value", siae.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index b2e5eebe5ea..fe922d26494 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfYear import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.IsoWeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; @@ -764,6 +765,15 @@ public class OptimizerTests extends ESTestCase { assertEquals(FIVE, nullEquals.right()); } + public void testLiteralsOnTheRightInStDistance() { + Alias a = new Alias(EMPTY, "a", L(10)); + Expression result = new BooleanLiteralsOnTheRight().rule(new StDistance(EMPTY, FIVE, a)); + assertTrue(result instanceof StDistance); + StDistance sd = (StDistance) result; + assertEquals(a, sd.left()); + assertEquals(FIVE, sd.right()); + } + public void testBoolSimplifyNotIsNullAndNotIsNotNull() { BooleanSimplification simplification = new BooleanSimplification(); assertTrue(simplification.rule(new Not(EMPTY, new IsNull(EMPTY, ONE))) instanceof IsNotNull); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index bb4fb02ea7e..4069d750fa6 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -56,7 +56,7 @@ public class SysColumnsTests extends ESTestCase { SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); // nested fields are ignored - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -143,7 +143,7 @@ public class SysColumnsTests extends ESTestCase { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.ODBC); - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -232,7 +232,7 @@ public class SysColumnsTests extends ESTestCase { assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - + row = rows.get(9); assertEquals("some.ambiguous", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); @@ -278,7 +278,7 @@ public class SysColumnsTests extends ESTestCase { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.JDBC); - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -462,7 +462,7 @@ public class SysColumnsTests extends ESTestCase { public void testSysColumnsWithCatalogWildcard() throws Exception { executeCommand("SYS COLUMNS CATALOG 'cluster' TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -475,7 +475,7 @@ public class SysColumnsTests extends ESTestCase { public void testSysColumnsWithMissingCatalog() throws Exception { executeCommand("SYS COLUMNS TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -488,7 +488,7 @@ public class SysColumnsTests extends ESTestCase { public void testSysColumnsWithNullCatalog() throws Exception { executeCommand("SYS COLUMNS CATALOG ? TABLE LIKE 'test' LIKE '%'", singletonList(new SqlTypedParamValue("keyword", null)), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -528,4 +528,4 @@ public class SysColumnsTests extends ESTestCase { SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); return new Tuple<>(cmd, session); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 4a8da68a1d5..805268dd5b6 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -48,7 +48,7 @@ public class SysTypesTests extends ESTestCase { "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", - "UNSUPPORTED", "OBJECT", "NESTED"); + "GEO_SHAPE", "GEO_POINT", "UNSUPPORTED", "OBJECT", "NESTED"); cmd.execute(null, wrap(r -> { assertEquals(19, r.columnCount()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 0543e65d4ae..693840bd65c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateHistogram; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.GeoDistanceQuery; import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; import org.elasticsearch.xpack.sql.querydsl.query.Query; import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; @@ -65,6 +66,7 @@ import static org.elasticsearch.xpack.sql.expression.function.scalar.math.MathPr import static org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation.PI; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class QueryTranslatorTests extends ESTestCase { @@ -496,7 +498,7 @@ public class QueryTranslatorTests extends ESTestCase { assertNull(translation.query); AggFilter aggFilter = translation.aggFilter; assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.gt(InternalSqlScriptUtils." + - operation.name().toLowerCase(Locale.ROOT) + "(params.a0),params.v0))", + operation.name().toLowerCase(Locale.ROOT) + "(params.a0),params.v0))", aggFilter.scriptTemplate().toString()); assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=max(int){a->")); assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); @@ -561,6 +563,109 @@ public class QueryTranslatorTests extends ESTestCase { assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); } + public void testTranslateStAsWktForPoints() { + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_AsWKT(point) = 'point (10 20)'"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(" + + "InternalSqlScriptUtils.stAswkt(InternalSqlScriptUtils.geoDocValue(doc,params.v0))," + + "params.v1)" + + ")", + aggFilter.scriptTemplate().toString()); + assertEquals("[{v=point}, {v=point (10 20)}]", aggFilter.scriptTemplate().params().toString()); + } + + public void testTranslateStWktToSql() { + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_WKTToSQL(keyword) = ST_WKTToSQL('point (10 20)')"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils.eq(InternalSqlScriptUtils.stWktToSql(" + + "InternalSqlScriptUtils.docValue(doc,params.v0)),InternalSqlScriptUtils.stWktToSql(params.v1)))", + aggFilter.scriptTemplate().toString()); + assertEquals("[{v=keyword}, {v=point (10.0 20.0)}]", aggFilter.scriptTemplate().params().toString()); + } + + public void testTranslateStDistanceToScript() { + String operator = randomFrom(">", ">="); + String operatorFunction = operator.equalsIgnoreCase(">") ? "gt" : "gte"; + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_Distance(point, ST_WKTToSQL('point (10 20)')) " + operator + " 20"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils." + operatorFunction + "(" + + "InternalSqlScriptUtils.stDistance(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0),InternalSqlScriptUtils.stWktToSql(params.v1)),params.v2))", + sc.script().toString()); + assertEquals("[{v=point}, {v=point (10.0 20.0)}, {v=20}]", sc.script().params().toString()); + } + + public void testTranslateStDistanceToQuery() { + String operator = randomFrom("<", "<="); + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_Distance(point, ST_WKTToSQL('point (10 20)')) " + operator + " 25"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof GeoDistanceQuery); + GeoDistanceQuery gq = (GeoDistanceQuery) translation.query; + assertEquals("point", gq.field()); + assertEquals(20.0, gq.lat(), 0.00001); + assertEquals(10.0, gq.lon(), 0.00001); + assertEquals(25.0, gq.distance(), 0.00001); + } + + public void testTranslateStXY() { + String dim = randomFrom("X", "Y"); + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_" + dim + "(point) = 10"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertThat(translation.query, instanceOf(ScriptQuery.class)); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(InternalSqlScriptUtils.st" + dim + "(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0)),params.v1))", + sc.script().toString()); + assertEquals("[{v=point}, {v=10}]", sc.script().params().toString()); + } + + public void testTranslateStGeometryType() { + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_GEOMETRYTYPE(point) = 'POINT'"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertThat(translation.query, instanceOf(ScriptQuery.class)); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(InternalSqlScriptUtils.stGeometryType(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0)),params.v1))", + sc.script().toString()); + assertEquals("[{v=point}, {v=POINT}]", sc.script().params().toString()); + } + public void testTranslateCoalesce_GroupBy_Painless() { LogicalPlan p = plan("SELECT COALESCE(int, 10) FROM test GROUP BY 1"); assertTrue(p instanceof Aggregate); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 65b491fe71a..997de6e2f5c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -170,8 +170,11 @@ public class TypesTests extends ESTestCase { public void testGeoField() { Map mapping = loadMapping("mapping-geo.json"); - EsField dt = mapping.get("location"); - assertThat(dt.getDataType().typeName, is("unsupported")); + assertThat(mapping.size(), is(2)); + EsField gp = mapping.get("location"); + assertThat(gp.getDataType().typeName, is("geo_point")); + EsField gs = mapping.get("site"); + assertThat(gs.getDataType().typeName, is("geo_shape")); } public void testIpField() { diff --git a/x-pack/plugin/sql/src/test/resources/mapping-geo.json b/x-pack/plugin/sql/src/test/resources/mapping-geo.json index 3c958ff37ed..e6e499ef82e 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-geo.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-geo.json @@ -2,6 +2,9 @@ "properties" : { "location" : { "type" : "geo_point" + }, + "site": { + "type" : "geo_shape" } } } diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json index d93633f7ace..c75ecfdc845 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -44,6 +44,8 @@ } } }, - "foo_type" : { "type" : "foo" } + "foo_type" : { "type" : "foo" }, + "point": {"type" : "geo_point"}, + "shape": {"type" : "geo_shape"} } } diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json index 448c50e6a9f..e46d64a45e8 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json @@ -6,6 +6,7 @@ "keyword" : { "type" : "keyword" }, "unsupported" : { "type" : "ip_range" }, "date" : { "type" : "date"}, + "shape": { "type" : "geo_shape" }, "some" : { "properties" : { "dotted" : { From a8aa818e0046a55d6a04f993726080dc45814880 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 15 May 2019 08:11:55 -0400 Subject: [PATCH 45/67] Cacheability improvements for thirdparty audit task (#42085) (#42151) --- .../gradle/precommit/ThirdPartyAuditTask.java | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index 8ec979420c0..e73a9d1e585 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -28,11 +28,13 @@ import org.gradle.api.artifacts.Dependency; import org.gradle.api.file.FileTree; import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; -import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.PathSensitive; import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; @@ -45,6 +47,7 @@ import java.io.IOException; import java.net.URISyntaxException; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -113,7 +116,7 @@ public class ThirdPartyAuditTask extends DefaultTask { this.javaHome = javaHome; } - @OutputDirectory + @Internal public File getJarExpandDir() { return new File( new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), @@ -121,6 +124,11 @@ public class ThirdPartyAuditTask extends DefaultTask { ); } + @OutputFile + public File getSuccessMarker() { + return new File(getProject().getBuildDir(), "markers/" + getName()); + } + public void ignoreMissingClasses(String... classesOrPackages) { if (classesOrPackages.length == 0) { missingClassExcludes = null; @@ -157,8 +165,7 @@ public class ThirdPartyAuditTask extends DefaultTask { return missingClassExcludes; } - @InputFiles - @PathSensitive(PathSensitivity.NAME_ONLY) + @Classpath @SkipWhenEmpty public Set getJarsToScan() { // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, @@ -241,6 +248,10 @@ public class ThirdPartyAuditTask extends DefaultTask { } assertNoJarHell(jdkJarHellClasses); + + // Mark successful third party audit check + getSuccessMarker().getParentFile().mkdirs(); + Files.write(getSuccessMarker().toPath(), new byte[]{}); } private void logForbiddenAPIsOutput(String forbiddenApisOutput) { From 15fd233ae36c11c7c48aaa7b76e2837e80124080 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 15 May 2019 09:26:04 -0400 Subject: [PATCH 46/67] Minor cluster coordination docs fixes (#42111) Fixes a typo and a badly-formatted warning. --- docs/reference/modules/discovery/discovery-settings.asciidoc | 2 +- docs/reference/setup/important-settings/heap-size.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/discovery/discovery-settings.asciidoc b/docs/reference/modules/discovery/discovery-settings.asciidoc index aaa39c9db62..0c23e771089 100644 --- a/docs/reference/modules/discovery/discovery-settings.asciidoc +++ b/docs/reference/modules/discovery/discovery-settings.asciidoc @@ -39,7 +39,7 @@ Discovery and cluster formation are also affected by the following _expert-level_ settings, although it is not recommended to change any of these from their default values. -[WARNING] If you adjust these settings then your cluster may not form correctly +WARNING: If you adjust these settings then your cluster may not form correctly or may become unstable or intolerant of certain failures. `discovery.cluster_formation_warning_timeout`:: diff --git a/docs/reference/setup/important-settings/heap-size.asciidoc b/docs/reference/setup/important-settings/heap-size.asciidoc index 890a9786e09..37e417e086e 100644 --- a/docs/reference/setup/important-settings/heap-size.asciidoc +++ b/docs/reference/setup/important-settings/heap-size.asciidoc @@ -10,7 +10,7 @@ Elasticsearch will assign the entire heap specified in heap size) settings. You should set these two settings to be equal to each other. -The value for these setting depends on the amount of RAM available on your +The value for these settings depends on the amount of RAM available on your server: * Set `Xmx` and `Xms` to no more than 50% of your physical RAM. {es} requires From 2f8c5ac6f878125a79ef0955047a019c6717d183 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 15 May 2019 10:50:54 -0400 Subject: [PATCH 47/67] Docs: Mark SQL Geo functionality as beta (#42138) Adds beta marker to geosql documentation --- docs/reference/sql/functions/geo.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index f5ed716eaeb..112ddfffce6 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -3,6 +3,8 @@ [[sql-functions-geo]] === Geo Functions +beta[] + The geo functions work with geometries stored in `geo_point` and `geo_shape` fields, or returned by other geo functions. ==== Limitations From 9191b02213f2217ac97b199c82683088daa9f5be Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Thu, 16 May 2019 03:53:34 +1000 Subject: [PATCH 48/67] Enforce transport TLS on Basic with Security (#42150) If a basic license enables security, then we should also enforce TLS on the transport interface. This was already the case for Standard/Gold/Platinum licenses. For Basic, security defaults to disabled, so some of the process around checking whether security is actuallY enabled is more complex now that we need to account for basic licenses. --- .../org/elasticsearch/license/License.java | 18 --- .../elasticsearch/license/LicenseService.java | 5 +- .../license/XPackLicenseState.java | 25 +++- .../core/ssl/TLSLicenseBootstrapCheck.java | 10 +- .../ssl/TLSLicenseBootstrapCheckTests.java | 123 ++++++++++++++---- .../xpack/security/Security.java | 15 ++- .../xpack/security/SecurityTests.java | 46 +++++-- 7 files changed, 178 insertions(+), 64 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index 62ffd76e8ea..e39b5b7dcc1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -780,22 +780,4 @@ public class License implements ToXContentObject { } } - /** - * Returns true iff the license is a production licnese - */ - public boolean isProductionLicense() { - switch (operationMode()) { - case MISSING: - case TRIAL: - case BASIC: - return false; - case STANDARD: - case GOLD: - case PLATINUM: - return true; - default: - throw new AssertionError("unknown operation mode: " + operationMode()); - - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 837caf2da07..f750d1349a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -218,10 +218,13 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste } } + // This check would be incorrect if "basic" licenses were allowed here + // because the defaults there mean that security can be "off", even if the setting is "on" + // BUT basic licenses are explicitly excluded earlier in this method, so we don't need to worry if (XPackSettings.SECURITY_ENABLED.get(settings)) { // TODO we should really validate that all nodes have xpack installed and are consistently configured but this // should happen on a different level and not in this code - if (newLicense.isProductionLicense() + if (XPackLicenseState.isTransportTlsRequired(newLicense, settings) && XPackSettings.TRANSPORT_SSL_ENABLED.get(settings) == false && isProductionMode(settings, clusterService.localNode())) { // security is on but TLS is not configured we gonna fail the entire request and throw an exception diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 131069d27f6..e206ed3db51 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -282,7 +282,7 @@ public class XPackLicenseState { public XPackLicenseState(Settings settings) { this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - this.isSecurityExplicitlyEnabled = isSecurityEnabled && settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + this.isSecurityExplicitlyEnabled = isSecurityEnabled && isSecurityExplicitlyEnabled(settings); } private XPackLicenseState(XPackLicenseState xPackLicenseState) { @@ -292,6 +292,10 @@ public class XPackLicenseState { this.status = xPackLicenseState.status; } + private static boolean isSecurityExplicitlyEnabled(Settings settings) { + return settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + } + /** * Updates the current state of the license, which will change what features are available. * @@ -727,6 +731,25 @@ public class XPackLicenseState { return false; } + public static boolean isTransportTlsRequired(License license, Settings settings) { + if (license == null) { + return false; + } + switch (license.operationMode()) { + case STANDARD: + case GOLD: + case PLATINUM: + return XPackSettings.SECURITY_ENABLED.get(settings); + case BASIC: + return XPackSettings.SECURITY_ENABLED.get(settings) && isSecurityExplicitlyEnabled(settings); + case MISSING: + case TRIAL: + return false; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + } + private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, final boolean isSecurityEnabled) { switch (mode) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java index 6f6592bbdfc..a042aeb4a23 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java @@ -9,6 +9,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.XPackSettings; /** @@ -19,10 +20,11 @@ public final class TLSLicenseBootstrapCheck implements BootstrapCheck { public BootstrapCheckResult check(BootstrapContext context) { if (XPackSettings.TRANSPORT_SSL_ENABLED.get(context.settings()) == false) { License license = LicenseService.getLicense(context.metaData()); - if (license != null && license.isProductionLicense()) { - return BootstrapCheckResult.failure("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting [xpack.security.enabled] " + - "to [false]"); + if (XPackLicenseState.isTransportTlsRequired(license, context.settings())) { + return BootstrapCheckResult.failure("Transport SSL must be enabled if security is enabled on a [" + + license.operationMode().description() + "] license. " + + "Please set [xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]"); } } return BootstrapCheckResult.success(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java index ac73418800c..3cb14180930 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java @@ -5,40 +5,115 @@ */ package org.elasticsearch.xpack.core.ssl; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.License; +import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.license.TestUtils; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import java.util.EnumSet; - public class TLSLicenseBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - public void testBootstrapCheck() throws Exception { + public void testBootstrapCheckOnEmptyMetadata() { assertTrue(new TLSLicenseBootstrapCheck().check(emptyContext).isSuccess()); assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext(Settings.builder().put("xpack.security.transport.ssl.enabled" - , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); - int numIters = randomIntBetween(1,10); - for (int i = 0; i < numIters; i++) { - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - MetaData.Builder builder = MetaData.builder(); - TestUtils.putLicense(builder, license); - MetaData build = builder.build(); - if (productionModes.contains(license.operationMode()) == false) { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", true).build(), build)).isSuccess()); - } else { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).isFailure()); - assertEquals("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + - "[xpack.security.enabled] to [false]", - new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).getMessage()); - } + , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); + } + + public void testBootstrapCheckFailureOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + if (randomBoolean()) { + // randomise between default-true & explicit-true + settings.put("xpack.security.enabled", true); + } + + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [" + mode.description() + "] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapCheckSucceedsWithTlsEnabledOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertSuccess(result); + } + + public void testBootstrapCheckFailureOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.enabled", true); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [basic] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapSucceedsIfSecurityIsNotEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.enabled", false); + } + if (randomBoolean()) { + // it does not matter whether or not this is set, as security is not enabled. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); + } + + public void testBootstrapSucceedsIfTlsIsEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + if (randomBoolean()) { + // it does not matter whether or not this is set, as TLS is enabled. + settings.put("xpack.security.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); + } + + public void testBootstrapCheckAlwaysSucceedsOnTrialLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.enabled", randomBoolean()); + } + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.TRIAL, settings); + assertSuccess(result); + } + + public BootstrapCheck.BootstrapCheckResult runBootstrapCheck(OperationMode mode, Settings.Builder settings) throws Exception { + final License license = TestUtils.generateSignedLicense(mode.description(), TimeValue.timeValueHours(24)); + MetaData.Builder builder = MetaData.builder(); + TestUtils.putLicense(builder, license); + MetaData metaData = builder.build(); + final BootstrapContext context = createTestContext(settings.build(), metaData); + return new TLSLicenseBootstrapCheck().check(context); + } + + public void assertSuccess(BootstrapCheck.BootstrapCheckResult result) { + if (result.isFailure()) { + fail("Bootstrap check failed unexpectedly: " + result.getMessage()); } } + } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 569293af3b5..9ba3bdab21f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -258,8 +258,8 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7; public class Security extends Plugin implements ActionPlugin, IngestPlugin, NetworkPlugin, ClusterPlugin, @@ -985,7 +985,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw public BiConsumer getJoinValidator() { if (enabled) { return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), - DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) + DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings), settings) .andThen(new ValidateUpgradedSecurityIndex()) .andThen(new ValidateLicenseCanBeDeserialized()) .andThen(new ValidateLicenseForFIPS(XPackSettings.FIPS_MODE_ENABLED.get(settings))); @@ -996,18 +996,21 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw static final class ValidateTLSOnJoin implements BiConsumer { private final boolean isTLSEnabled; private final String discoveryType; + private final Settings settings; - ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType) { + ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType, Settings settings) { this.isTLSEnabled = isTLSEnabled; this.discoveryType = discoveryType; + this.settings = settings; } @Override public void accept(DiscoveryNode node, ClusterState state) { License license = LicenseService.getLicense(state.metaData()); - if (license != null && license.isProductionLicense() && - isTLSEnabled == false && "single-node".equals(discoveryType) == false) { - throw new IllegalStateException("TLS setup is required for license type [" + license.operationMode().name() + "]"); + if (isTLSEnabled == false && "single-node".equals(discoveryType) == false + && XPackLicenseState.isTransportTlsRequired(license, settings)) { + throw new IllegalStateException("Transport TLS ([" + XPackSettings.TRANSPORT_SSL_ENABLED.getKey() + + "]) is required for license type [" + license.operationMode().description() + "] when security is enabled"); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 47484dcbce2..2a2178a0bf7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -54,7 +54,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -67,9 +66,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; -import static org.elasticsearch.discovery.DiscoveryModule.ZEN_DISCOVERY_TYPE; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -254,17 +252,45 @@ public class SecurityTests extends ESTestCase { int numIters = randomIntBetween(1, 10); for (int i = 0; i < numIters; i++) { boolean tlsOn = randomBoolean(); - String discoveryType = randomFrom("single-node", ZEN_DISCOVERY_TYPE, ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); - Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType); + boolean securityExplicitlyEnabled = randomBoolean(); + String discoveryType = randomFrom("single-node", ZEN2_DISCOVERY_TYPE, ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); + + final Settings settings; + if (securityExplicitlyEnabled) { + settings = Settings.builder().put("xpack.security.enabled", true).build(); + } else { + settings = Settings.EMPTY; + } + Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType, settings); MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + License.OperationMode licenseMode = randomFrom(License.OperationMode.values()); + License license = TestUtils.generateSignedLicense(licenseMode.description(), TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - if (productionModes.contains(license.operationMode()) && tlsOn == false && "single-node".equals(discoveryType) == false) { + + final boolean expectFailure; + switch (licenseMode) { + case PLATINUM: + case GOLD: + case STANDARD: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false; + break; + case BASIC: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false && securityExplicitlyEnabled; + break; + case MISSING: + case TRIAL: + expectFailure = false; + break; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + logger.info("Test TLS join; Lic:{} TLS:{} Disco:{} Settings:{} ; Expect Failure: {}", + licenseMode, tlsOn, discoveryType, settings.toDelimitedString(','), expectFailure); + if (expectFailure) { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); - assertEquals("TLS setup is required for license type [" + license.operationMode().name() + "]", ise.getMessage()); + assertEquals("Transport TLS ([xpack.security.transport.ssl.enabled]) is required for license type [" + + license.operationMode().description() + "] when security is enabled", ise.getMessage()); } else { validator.accept(node, state); } From 7473742e6eb053f812d809576c78bdf80476e422 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Wed, 15 May 2019 16:06:03 -0400 Subject: [PATCH 49/67] SQL: Fix issue regarding INTERVAL * number (#42014) Interval * integer number is a valid operation which previously was only supported for foldables (literals) and not when a field was involved. That was because: 1. There was no common type returned for that combination 2. The `BinaryArithmeticOperation` was permitting the multiplication (called by fold()) but the BinaryArithmeticProcessor didn't allow it Moreover the error message for invalid arithmetic operations was wrong because of the issue with the overloading methods of `LoggerMessageFormat.format`. Fixes: #41239 Fixes: #41200 (cherry picked from commit 91039bab12d3ef27d6eac9cdc891a3b3ad0c694d) --- .../main/resources/datetime-interval.csv-spec | 20 ++++++++++++++++++ .../arithmetic/BinaryArithmeticProcessor.java | 6 +++--- .../DateTimeArithmeticOperation.java | 8 ++++++- .../predicate/operator/arithmetic/Mul.java | 2 +- .../predicate/operator/arithmetic/Sub.java | 4 ++++ .../xpack/sql/type/DataTypeConversion.java | 11 ++++++++++ .../analyzer/VerifierErrorMessagesTests.java | 21 +++++++++++++++++++ .../sql/type/DataTypeConversionTests.java | 4 ++++ 8 files changed, 71 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec index 8d9a65d1b85..bfb28775bc3 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec @@ -182,6 +182,26 @@ SELECT -2 * INTERVAL '1 23:45' DAY TO MINUTES AS result; -3 23:30:00.0 ; +intervalHoursMultiply +SELECT 4 * -INTERVAL '2' HOURS AS result1, -5 * -INTERVAL '3' HOURS AS result2; + result1 | result2 +---------------+-------------- +-0 08:00:00.0 | +0 15:00:00.0 +; + +intervalAndFieldMultiply +schema::languages:byte|result:string +SELECT languages, CAST (languages * INTERVAL '1 10:30' DAY TO MINUTES AS string) AS result FROM test_emp ORDER BY emp_no LIMIT 5; + + languages | result +---------------+--------------------------------------------- +2 | +2 21:00:00.0 +5 | +7 04:30:00.0 +4 | +5 18:00:00.0 +5 | +7 04:30:00.0 +1 | +1 10:30:00.0 +; + dateMinusInterval SELECT CAST('2018-05-13T12:34:56' AS DATETIME) - INTERVAL '2-8' YEAR TO MONTH AS result; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java index b6bfaa4acb6..5705bb4d85a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java @@ -164,7 +164,7 @@ public class BinaryArithmeticProcessor extends FunctionalBinaryProcessor Date: Tue, 14 May 2019 07:45:14 -0400 Subject: [PATCH 50/67] Adjust load and timeout in testShrinkIndexPrimaryTerm (#42098) This test can create and shuffle 2*(3*5*7) = 210 shards which is quite heavy for our CI. This commit reduces the load, so we don't timeout on CI. Closes #28153 --- .../action/admin/indices/create/ShrinkIndexIT.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index feeb9646e40..b14bdd0ed98 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -65,7 +65,6 @@ import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; -import java.util.List; import java.util.Map; import java.util.stream.IntStream; @@ -165,11 +164,8 @@ public class ShrinkIndexIT extends ESIntegTestCase { } public void testShrinkIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(2, 3, 5, 7); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size() - 1), factors); - final int numberOfShards = numberOfShardsFactors.stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = randomSubsetOf(randomInt(numberOfShardsFactors.size() - 1), numberOfShardsFactors) - .stream().reduce(1, (x, y) -> x * y); + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); @@ -218,7 +214,7 @@ public class ShrinkIndexIT extends ESIntegTestCase { final Settings.Builder prepareShrinkSettings = Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true); client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData indexMetaData = indexMetaData(client(), "source"); final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetaData::primaryTerm).max().getAsLong(); @@ -228,7 +224,7 @@ public class ShrinkIndexIT extends ESIntegTestCase { Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); final IndexMetaData afterShrinkIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { From 6ffc6ea42e101050624a4a5f36e2bfd61c417c67 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 15 May 2019 18:13:04 -0400 Subject: [PATCH 51/67] Don't verify evictions in testFilterCacheStats (#42091) If a background merge and refresh happens after a search but before a stats query, then evictions will be non-zero. Closes #32506 --- .../java/org/elasticsearch/indices/stats/IndexStatsIT.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a3697af50b0..59e7c21a3e6 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -62,7 +62,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.ArrayList; @@ -1008,8 +1007,6 @@ public class IndexStatsIT extends ESIntegTestCase { assertEquals(total, shardTotal); } - @TestLogging("_root:DEBUG") // this fails at a very low rate on CI: https://github.com/elastic/elasticsearch/issues/32506 - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); assertAcked(prepareCreate("index").setSettings(settings).get()); @@ -1034,7 +1031,6 @@ public class IndexStatsIT extends ESIntegTestCase { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), equalTo(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); @@ -1045,7 +1041,6 @@ public class IndexStatsIT extends ESIntegTestCase { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), greaterThan(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); From 8681dd9cba2260237480d6aea4479a4449a9e9e6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 09:48:57 -0400 Subject: [PATCH 52/67] Hide bwc build output on success (#42102) Previously we used LoggedExec for running the internal bwc builds. However, this had bad performance implications as all the output was buffered into memory, thus we changed back to normal Exec. This commit adds a `spoolOutput` setting to LoggedExec which can be used for commands with large amounts of output, and switches the bwc builds to use this flag. --- .../gradle/LazyFileOutputStream.java | 67 +++++++++++++++++ .../org/elasticsearch/gradle/LoggedExec.java | 72 +++++++++++++------ distribution/bwc/build.gradle | 3 +- 3 files changed, 118 insertions(+), 24 deletions(-) create mode 100644 buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java new file mode 100644 index 00000000000..d3101868e84 --- /dev/null +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +/** + * An outputstream to a File that is lazily opened on the first write. + */ +class LazyFileOutputStream extends OutputStream { + private OutputStream delegate; + + LazyFileOutputStream(File file) { + // use an initial dummy delegate to avoid doing a conditional on every write + this.delegate = new OutputStream() { + private void bootstrap() throws IOException { + file.getParentFile().mkdirs(); + delegate = new FileOutputStream(file); + } + @Override + public void write(int b) throws IOException { + bootstrap(); + delegate.write(b); + } + @Override + public void write(byte b[], int off, int len) throws IOException { + bootstrap(); + delegate.write(b, off, len); + } + }; + } + + @Override + public void write(int b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte b[], int off, int len) throws IOException { + delegate.write(b, off, len); + } + + @Override + public void close() throws IOException { + delegate.close(); + } +} diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index 8dd59170039..a3f87572932 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -3,14 +3,22 @@ package org.elasticsearch.gradle; import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; +import org.gradle.api.logging.Logger; import org.gradle.api.tasks.Exec; +import org.gradle.api.tasks.Internal; import org.gradle.process.BaseExecSpec; import org.gradle.process.ExecResult; import org.gradle.process.ExecSpec; import org.gradle.process.JavaExecSpec; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -19,37 +27,55 @@ import java.util.function.Function; @SuppressWarnings("unchecked") public class LoggedExec extends Exec { + private Consumer outputLogger; + public LoggedExec() { - ByteArrayOutputStream output = new ByteArrayOutputStream(); - ByteArrayOutputStream error = new ByteArrayOutputStream(); + if (getLogger().isInfoEnabled() == false) { - setStandardOutput(output); - setErrorOutput(error); setIgnoreExitValue(true); - doLast((unused) -> { - if (getExecResult().getExitValue() != 0) { - try { - getLogger().error("Standard output:"); - getLogger().error(output.toString("UTF-8")); - getLogger().error("Standard error:"); - getLogger().error(error.toString("UTF-8")); - } catch (UnsupportedEncodingException e) { - throw new GradleException("Failed to read exec output", e); - } - throw new GradleException( - String.format( - "Process '%s %s' finished with non-zero exit value %d", - getExecutable(), - getArgs(), - getExecResult().getExitValue() - ) - ); + setSpoolOutput(false); + doLast(task -> { + if (getExecResult().getExitValue() != 0) { + try { + getLogger().error("Output for " + getExecutable() + ":"); + outputLogger.accept(getLogger()); + } catch (Exception e) { + throw new GradleException("Failed to read exec output", e); } + throw new GradleException( + String.format( + "Process '%s %s' finished with non-zero exit value %d", + getExecutable(), + getArgs(), + getExecResult().getExitValue() + ) + ); } - ); + }); } } + @Internal + public void setSpoolOutput(boolean spoolOutput) { + final OutputStream out; + if (spoolOutput) { + File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName()); + out = new LazyFileOutputStream(spoolFile); + outputLogger = logger -> { + try { + Files.lines(spoolFile.toPath()).forEach(logger::error); + } catch (IOException e) { + throw new RuntimeException("could not log", e); + } + }; + } else { + out = new ByteArrayOutputStream(); + outputLogger = logger -> logger.error(((ByteArrayOutputStream) getStandardOutput()).toString(StandardCharsets.UTF_8)); + } + setStandardOutput(out); + setErrorOutput(out); + } + public static ExecResult exec(Project project, Action action) { return genericExec(project, project::exec, action); } diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 8285d8dae2b..87644fb7f67 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -121,8 +121,9 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased } Closure createRunBwcGradleTask = { name, extraConfig -> - return tasks.create(name: "$name", type: Exec) { + return tasks.create(name: "$name", type: LoggedExec) { dependsOn checkoutBwcBranch, writeBuildMetadata + spoolOutput = true workingDir = checkoutDir doFirst { // Execution time so that the checkouts are available From fa1d1d1f577d4674b0214753b3c1481e1a2b3e4c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 09:52:13 -0400 Subject: [PATCH 53/67] Deprecate the native realm migration tool (#42142) The migrate tool was added when the native realm was created, to aid users in converting from file realms that were per node, into the cluster managed native realm. While this tool was useful at the time, users should now be using the native realm directly. This commit deprecates the tool, to be removed in a followup for 8.0. --- docs/reference/commands/migrate-tool.asciidoc | 2 ++ .../xpack/security/authc/esnative/ESNativeRealmMigrateTool.java | 1 + 2 files changed, 3 insertions(+) diff --git a/docs/reference/commands/migrate-tool.asciidoc b/docs/reference/commands/migrate-tool.asciidoc index a1903ac69da..2c2f4abf433 100644 --- a/docs/reference/commands/migrate-tool.asciidoc +++ b/docs/reference/commands/migrate-tool.asciidoc @@ -3,6 +3,8 @@ [[migrate-tool]] == elasticsearch-migrate +deprecated:[7.2.0, "This tool is deprecated. Use the native realm directly."] + The `elasticsearch-migrate` command migrates existing file-based users and roles to the native realm. From 5.0 onward, you should use the `native` realm to manage roles and local users. diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java index 6368f4a7510..0fbe54d7c10 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java @@ -133,6 +133,7 @@ public class ESNativeRealmMigrateTool extends LoggingAwareMultiCommand { // Visible for testing @Override public void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println("Warning: The migrate tool is deprecated. Use the native realm directly instead of file realms."); terminal.println("starting migration of users and roles..."); importUsers(terminal, env, options); importRoles(terminal, env, options); From 4063701f5e52935488dad16ba085febfdf6e2fbf Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 16 May 2019 10:10:23 -0400 Subject: [PATCH 54/67] [DOCS] add a warning about bypassing PUT API's, update example responses (#42062) Configurations are stored in the .data-frame-internal-1 index, but users should not add configurations directly to the index as additional information to enable access control is added. This adds a warning against allowing access to the internal index. --- .../apis/get-transform-stats.asciidoc | 36 +++++++++++-------- .../data-frames/apis/get-transform.asciidoc | 21 ++++++++--- .../data-frames/apis/put-transform.asciidoc | 14 +++++--- 3 files changed, 48 insertions(+), 23 deletions(-) diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index 85e5001b13a..09c383f2494 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -65,27 +65,35 @@ The API returns the following results: { "id" : "ecommerce_transform", "state" : { + "task_state" : "started", "indexer_state" : "started", - "task_state": "started", - "current_position" : { - "customer_id" : "9" - }, - "generation" : 1 + "checkpoint" : 1, + "progress" : { + "total_docs" : 1220, + "docs_remaining" : 0, + "percent_complete" : 100.0 + } }, "stats" : { - "pages_processed" : 0, - "documents_processed" : 0, - "documents_indexed" : 0, - "trigger_count" : 0, - "index_time_in_ms" : 0, - "index_total" : 0, + "pages_processed" : 2, + "documents_processed" : 1220, + "documents_indexed" : 13, + "trigger_count" : 1, + "index_time_in_ms" : 19, + "index_total" : 1, "index_failures" : 0, - "search_time_in_ms" : 0, - "search_total" : 0, + "search_time_in_ms" : 52, + "search_total" : 2, "search_failures" : 0 + }, + "checkpointing" : { + "current" : { + "timestamp_millis" : 1557474786393 + }, + "operations_behind" : 0 } } ] } ---- -// TESTRESPONSE \ No newline at end of file +// TESTRESPONSE diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index 85e56aa21cd..e2b5c5eccb7 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -75,10 +75,20 @@ The API returns the following results: "transforms" : [ { "id" : "ecommerce_transform", - "source" : "kibana_sample_data_ecommerce", - "dest" : "kibana_sample_data_ecommerce_transform", - "query" : { - "match_all" : { } + "source" : { + "index" : [ + "kibana_sample_data_ecommerce" + ], + "query" : { + "term" : { + "geoip.continent_name" : { + "value" : "Asia" + } + } + } + }, + "dest" : { + "index" : "kibana_sample_data_ecommerce_transform" }, "pivot" : { "group_by" : { @@ -95,7 +105,8 @@ The API returns the following results: } } } - } + }, + "description" : "Maximum priced ecommerce data by customer_id in Asia" } ] } diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 222d93dfe42..f452c38ab4c 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -15,7 +15,13 @@ Instantiates a {dataframe-transform}. `PUT _data_frame/transforms/` -//===== Description +===== Description + +IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. + Do not put a {dataframe-transform} directly into any + `.data-frame-internal*` indices using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users any + privileges on `.data-frame-internal*` indices. ==== Path Parameters @@ -27,12 +33,12 @@ Instantiates a {dataframe-transform}. ==== Request Body -`source`:: (object) The source configuration, consisting of `index` and optionally +`source` (required):: (object) The source configuration, consisting of `index` and optionally a `query`. -`dest`:: (object) The destination configuration, consisting of `index`. +`dest` (required):: (object) The destination configuration, consisting of `index`. -`pivot`:: Defines the pivot function `group by` fields and the aggregation to +`pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to reduce the data. `description`:: Optional free text description of the data frame transform From 226df35d96f1a9357d25a1b124fd01cbbb822350 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 16 May 2019 13:11:23 -0400 Subject: [PATCH 55/67] [ML] Improve message misformation error in file structure finder (#42175) This change replaces the extremely unfriendly message "Number of messages analyzed must be positive" in the case where the sample lines were incorrectly grouped into just one message to an error that more helpfully explains the likely root cause of the problem. --- .../TextLogFileStructureFinder.java | 6 ++++++ .../TextLogFileStructureFinderTests.java | 21 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index b476e3e4654..36e5e91b432 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -76,6 +76,12 @@ public class TextLogFileStructureFinder implements FileStructureFinder { } // Don't add the last message, as it might be partial and mess up subsequent pattern finding + if (sampleMessages.isEmpty()) { + throw new IllegalArgumentException("Failed to create more than one message from the sample lines provided. (The " + + "last is discarded in case the sample is incomplete.) If your sample does contain multiple messages the " + + "problem is probably that the primary timestamp format has been incorrectly detected, so try overriding it."); + } + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.SEMI_STRUCTURED_TEXT) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 7ed5518c650..6cf4d61cf17 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -232,6 +232,27 @@ public class TextLogFileStructureFinderTests extends FileStructureTestCase { "\\[%{JAVACLASS:class} *\\] %{JAVALOGMESSAGE:message}] does not match sample messages", e.getMessage()); } + public void testErrorOnIncorrectMessageFormation() { + + // This sample causes problems because the (very weird) primary timestamp format + // is not detected but a secondary format that only occurs in one line is detected + String sample = "Day 21 Month 1 Year 2019 11:04 INFO [localhost] - starting\n" + + "Day 21 Month 1 Year 2019 11:04 INFO [localhost] - startup date [Mon Jan 21 11:04:19 CET 2019]\n" + + "Day 21 Month 1 Year 2019 11:04 DEBUG [localhost] - details\n" + + "Day 21 Month 1 Year 2019 11:04 DEBUG [localhost] - more details\n" + + "Day 21 Month 1 Year 2019 11:04 WARN [localhost] - something went wrong\n"; + + String charset = randomFrom(POSSIBLE_CHARSETS); + Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, FileStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER)); + + assertEquals("Failed to create more than one message from the sample lines provided. (The last is discarded in " + + "case the sample is incomplete.) If your sample does contain multiple messages the problem is probably that " + + "the primary timestamp format has been incorrectly detected, so try overriding it.", e.getMessage()); + } + public void testCreateMultiLineMessageStartRegexGivenNoPrefaces() { for (TimestampFormatFinder.CandidateTimestampFormat candidateTimestampFormat : TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS) { String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); From 51376f98a731c722786976cabb41706d02d11e9f Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 16 May 2019 13:36:09 -0400 Subject: [PATCH 56/67] Clarify rolling upgrade fallback to restart upgrade (#42161) Adds a note that restarting half-or-more of the master-eligible nodes means you're no longer doing a rolling upgrade, and may need to upgrade all the things before the cluster returns to health. --- .../upgrade/rolling_upgrade.asciidoc | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index e7a9ca09baa..789851ac7cf 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -167,14 +167,22 @@ include::open-ml.asciidoc[] During a rolling upgrade, the cluster continues to operate normally. However, any new functionality is disabled or operates in a backward compatible mode -until all nodes in the cluster are upgraded. New functionality -becomes operational once the upgrade is complete and all nodes are running the -new version. Once that has happened, there's no way to return to operating -in a backward compatible mode. Nodes running the previous major version will -not be allowed to join the fully-updated cluster. +until all nodes in the cluster are upgraded. New functionality becomes +operational once the upgrade is complete and all nodes are running the new +version. Once that has happened, there's no way to return to operating in a +backward compatible mode. Nodes running the previous major version will not be +allowed to join the fully-updated cluster. In the unlikely case of a network malfunction during the upgrade process that -isolates all remaining old nodes from the cluster, you must take the -old nodes offline and upgrade them to enable them to join the cluster. +isolates all remaining old nodes from the cluster, you must take the old nodes +offline and upgrade them to enable them to join the cluster. + +If you stop half or more of the master-eligible nodes all at once during the +upgrade then the cluster will become unavailable, meaning that the upgrade is +no longer a _rolling_ upgrade. If this happens, you should upgrade and restart +all of the stopped master-eligible nodes to allow the cluster to form again, as +if performing a <>. It may also +be necessary to upgrade all of the remaining old nodes before they can join the +cluster after it re-forms. ==================================================== From c40bd31073a5c721bf7bd4d112cd7a46f377de2c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 15:35:27 -0400 Subject: [PATCH 57/67] Use local outputstream reference (#42180) This commit fixes the logging in LoggedExec which uses an in memory buffer to read from a local reference, instead of with getStandardOutput() of the Exec task. This is due to gradle internally wrapping with a TeeOutputStream, breaking our cast. --- .../minimumRuntime/org/elasticsearch/gradle/LoggedExec.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index a3f87572932..233431ff3c4 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -70,7 +70,7 @@ public class LoggedExec extends Exec { }; } else { out = new ByteArrayOutputStream(); - outputLogger = logger -> logger.error(((ByteArrayOutputStream) getStandardOutput()).toString(StandardCharsets.UTF_8)); + outputLogger = logger -> logger.error(((ByteArrayOutputStream) out).toString(StandardCharsets.UTF_8)); } setStandardOutput(out); setErrorOutput(out); From a6e63e6fa8868da4138b18df62e3f5a4768ed6d0 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 15:38:15 -0400 Subject: [PATCH 58/67] Protect logged exec spooling from no output (#42177) This commit adds a guard around reading the spooled LoggedExec output. It is possible the exec command did not output anything, and failed, which would trigger a failure to read the output file. --- .../minimumRuntime/org/elasticsearch/gradle/LoggedExec.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index 233431ff3c4..c71b7ba1835 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -63,7 +63,10 @@ public class LoggedExec extends Exec { out = new LazyFileOutputStream(spoolFile); outputLogger = logger -> { try { - Files.lines(spoolFile.toPath()).forEach(logger::error); + // the file may not exist if the command never output anything + if (Files.exists(spoolFile.toPath())) { + Files.lines(spoolFile.toPath()).forEach(logger::error); + } } catch (IOException e) { throw new RuntimeException("could not log", e); } From ab7a7062ea1401c3e02223f5677e5d7b63137c54 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 17 May 2019 14:49:29 -0400 Subject: [PATCH 59/67] Make packaging tests use jdk downloader (#42097) This commit removes the jdk11 download in vagrant provisioning and converts it to using the jdk downloader for the system jdk, and sets up a separate jdk for use by the test (which will be converted to running gradle in a followup). --- Vagrantfile | 2 +- .../gradle/vagrant/BatsOverVagrantTask.groovy | 6 +-- .../gradle/vagrant/VagrantTestPlugin.groovy | 43 ++++++++++++++++--- 3 files changed, 40 insertions(+), 11 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 1acf4fe819b..14f6ad00f3a 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -72,7 +72,7 @@ Vagrant.configure(2) do |config| config.vm.box = 'elastic/debian-8-x86_64' deb_common config, box, extra: <<-SHELL # this sometimes gets a bad ip, and doesn't appear to be needed - rm /etc/apt/sources.list.d/http_debian_net_debian.list + rm -f /etc/apt/sources.list.d/http_debian_net_debian.list SHELL end end diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index 110f2fc7e84..af5d328dc0c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -27,15 +27,15 @@ import org.gradle.api.tasks.Input public class BatsOverVagrantTask extends VagrantCommandTask { @Input - String remoteCommand + Object remoteCommand BatsOverVagrantTask() { command = 'ssh' } - void setRemoteCommand(String remoteCommand) { + void setRemoteCommand(Object remoteCommand) { this.remoteCommand = Objects.requireNonNull(remoteCommand) - setArgs(['--command', remoteCommand]) + setArgs((Iterable) ['--command', remoteCommand]) } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 30a8052b3f3..0fe45c4321e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,11 +1,18 @@ package org.elasticsearch.gradle.vagrant import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.BwcVersions import org.elasticsearch.gradle.FileContentsTask +import org.elasticsearch.gradle.Jdk +import org.elasticsearch.gradle.JdkDownloadPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.BwcVersions -import org.gradle.api.* +import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException +import org.gradle.api.NamedDomainObjectContainer +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.Task import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency @@ -15,6 +22,8 @@ import org.gradle.api.tasks.Exec import org.gradle.api.tasks.StopExecutionException import org.gradle.api.tasks.TaskState +import java.nio.file.Paths + import static java.util.Collections.unmodifiableList class VagrantTestPlugin implements Plugin { @@ -85,8 +94,22 @@ class VagrantTestPlugin implements Plugin { /** extra env vars to pass to vagrant for box configuration **/ Map vagrantBoxEnvVars = [:] + private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691" + private Jdk linuxGradleJdk; + private Jdk windowsGradleJdk; + @Override void apply(Project project) { + project.pluginManager.apply(JdkDownloadPlugin.class) + NamedDomainObjectContainer jdksContainer = (NamedDomainObjectContainer) project.getExtensions().getByName("jdks"); + linuxGradleJdk = jdksContainer.create("linux_gradle") { + version = GRADLE_JDK_VERSION + platform = "linux" + } + windowsGradleJdk = jdksContainer.create("windows_gradle") { + version = GRADLE_JDK_VERSION + platform = "windows" + } collectAvailableBoxes(project) @@ -264,7 +287,7 @@ class VagrantTestPlugin implements Plugin { } } - private static void createPrepareVagrantTestEnvTask(Project project) { + private void createPrepareVagrantTestEnvTask(Project project) { File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION) File archivesDir = new File(packagingDir, 'archives') @@ -280,7 +303,7 @@ class VagrantTestPlugin implements Plugin { } Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) { - dependsOn copyPackagingTests + dependsOn copyPackagingTests, linuxGradleJdk file "${testsDir}/run-tests.sh" contents """\ if [ "\$#" -eq 0 ]; then @@ -288,11 +311,12 @@ class VagrantTestPlugin implements Plugin { else test_args=( "\$@" ) fi - java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" + + "${-> convertPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" """ } Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { - dependsOn copyPackagingTests + dependsOn copyPackagingTests, windowsGradleJdk file "${testsDir}/run-tests.ps1" // the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely // a little trappy for those unfamiliar with powershell @@ -302,7 +326,7 @@ class VagrantTestPlugin implements Plugin { } else { \$testArgs = \$args } - java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs + & "${-> convertPath(project, windowsGradleJdk.toString()) }"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs exit \$LASTEXITCODE """ } @@ -617,4 +641,9 @@ class VagrantTestPlugin implements Plugin { } } } + + // convert the given path from an elasticsearch repo path to a VM path + private String convertPath(Project project, String path) { + return "/elasticsearch/" + project.rootDir.toPath().relativize(Paths.get(path)); + } } From 076ca75ea52309d4be43fdf407e994fb5086f623 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 17 May 2019 15:59:34 -0400 Subject: [PATCH 60/67] SQL: Suppress geo tests failing on tr-TR locale (#42200) Due to a bug in JTS WKT parser, JTS cannot parse most of WKT shapes if the shape type is written in the lower case. For examples `point (1 2)` is causing JTS inside H2GIS to fail on tr-TR locale as a result of case-insensitive comparison. --- .../org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java index 405efac5cac..ec97cab6f10 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -31,6 +31,8 @@ public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { @ClassRule public static LocalH2 H2 = new LocalH2((c) -> { + assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + + " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); // Load GIS extensions H2GISFunctions.load(c); c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); From f2447364fd34228d19551e1989f2f7155caae184 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 17 May 2019 16:51:05 -0400 Subject: [PATCH 61/67] [ML] adds geo_centroid aggregation support to data frames (#42088) (#42094) --- .../integration/DataFramePivotRestIT.java | 50 +++++++++++++++++++ .../integration/DataFrameRestTestCase.java | 8 ++- .../pivot/AggregationResultUtils.java | 3 ++ .../transforms/pivot/Aggregations.java | 1 + .../transforms/pivot/AggregationsTests.java | 6 ++- 5 files changed, 66 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 75e179e5dee..9169ad5d93f 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -418,6 +418,56 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { assertEquals(3.878048780, actual.doubleValue(), 0.000001); } + public void testPivotWithGeoCentroidAgg() throws Exception { + String transformId = "geoCentroidPivot"; + String dataFrameIndex = "geo_centroid_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, + BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + String config = "{" + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"location\": {" + + " \"geo_centroid\": {\"field\": \"location\"}" + + " } } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(dataFrameIndex)); + + // we expect 27 documents as there shall be 27 user_id's + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + + // get and check some users + Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(3.878048780, actual.doubleValue(), 0.000001); + String actualString = (String) ((List) XContentMapValues.extractValue("hits.hits._source.location", searchResult)).get(0); + String[] latlon = actualString.split(","); + assertEquals((4 + 10), Double.valueOf(latlon[0]), 0.000001); + assertEquals((4 + 15), Double.valueOf(latlon[1]), 0.000001); + } + private void assertOnePivotValue(String query, double expected) throws IOException { Map searchResult = getAsMap(query); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 89047219f40..2de6a248858 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -76,6 +76,9 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { .startObject("stars") .field("type", "integer") .endObject() + .startObject("location") + .field("type", "geo_point") + .endObject() .endObject() .endObject(); } @@ -103,6 +106,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { min = 10 + (i % 49); } int sec = 10 + (i % 49); + String location = (user + 10) + "," + (user + 15); String date_string = "2017-01-" + day + "T" + hour + ":" + min + ":" + sec + "Z"; bulk.append("{\"user_id\":\"") @@ -113,7 +117,9 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { .append(business) .append("\",\"stars\":") .append(stars) - .append(",\"timestamp\":\"") + .append(",\"location\":\"") + .append(location) + .append("\",\"timestamp\":\"") .append(date_string) .append("\"}\n"); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index 8c4fa96a144..f8857591b23 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.search.aggregations.metrics.GeoCentroid; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; import org.elasticsearch.search.aggregations.metrics.ScriptedMetric; @@ -84,6 +85,8 @@ public final class AggregationResultUtils { } } else if (aggResult instanceof ScriptedMetric) { updateDocument(document, aggName, ((ScriptedMetric) aggResult).aggregation()); + } else if (aggResult instanceof GeoCentroid) { + updateDocument(document, aggName, ((GeoCentroid) aggResult).centroid().toString()); } else { // Execution should never reach this point! // Creating transforms with unsupported aggregations shall not be possible diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java index e7257c463ce..615c9b2e8d2 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java @@ -35,6 +35,7 @@ public final class Aggregations { MAX("max", SOURCE), MIN("min", SOURCE), SUM("sum", SOURCE), + GEO_CENTROID("geo_centroid", "geo_point"), SCRIPTED_METRIC("scripted_metric", DYNAMIC), BUCKET_SCRIPT("bucket_script", DYNAMIC); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java index 5fb8463ae54..8443699430a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java @@ -38,11 +38,15 @@ public class AggregationsTests extends ESTestCase { assertEquals("double", Aggregations.resolveTargetMapping("sum", "double")); assertEquals("half_float", Aggregations.resolveTargetMapping("sum", "half_float")); + // geo_centroid + assertEquals("geo_point", Aggregations.resolveTargetMapping("geo_centroid", "geo_point")); + assertEquals("geo_point", Aggregations.resolveTargetMapping("geo_centroid", null)); + // scripted_metric assertEquals("_dynamic", Aggregations.resolveTargetMapping("scripted_metric", null)); assertEquals("_dynamic", Aggregations.resolveTargetMapping("scripted_metric", "int")); - // scripted_metric + // bucket_script assertEquals("_dynamic", Aggregations.resolveTargetMapping("bucket_script", null)); assertEquals("_dynamic", Aggregations.resolveTargetMapping("bucket_script", "int")); } From a68b04e47bf91aa7f301643017eaa5a2d145216e Mon Sep 17 00:00:00 2001 From: Ed Savage <32410745+edsavage@users.noreply.github.com> Date: Fri, 17 May 2019 16:37:52 -0400 Subject: [PATCH 62/67] [ML] Improve hard_limit audit message (#42086) Improve the hard_limit memory audit message by reporting how many bytes over the configured memory limit the job was at the point of the last allocation failure. Previously the model memory usage was reported, however this was inaccurate and hence of limited use - primarily because the total memory used by the model can decrease significantly after the models status is changed to hard_limit but before the model size stats are reported from autodetect to ES. While this PR contains the changes to the format of the hard_limit audit message it is dependent on modifications to the ml-cpp backend to send additional data fields in the model size stats message. These changes will follow in a subsequent PR. It is worth noting that this PR must be merged prior to the ml-cpp one, to keep CI tests happy. --- .../client/ml/job/process/ModelSizeStats.java | 53 +++++++++++--- .../client/MachineLearningGetResultsIT.java | 32 ++++++++- .../ml/job/process/ModelSizeStatsTests.java | 8 +++ .../xpack/core/ml/job/messages/Messages.java | 7 +- .../autodetect/state/ModelSizeStats.java | 70 +++++++++++++++++-- .../autodetect/state/ModelSizeStatsTests.java | 8 +++ .../output/AutoDetectResultProcessor.java | 8 ++- .../AutoDetectResultProcessorTests.java | 7 +- 8 files changed, 171 insertions(+), 22 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java index c9a34fe5c98..6ea3cede0e3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java @@ -47,6 +47,8 @@ public class ModelSizeStats implements ToXContentObject { * Field Names */ public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes"); + public static final ParseField MODEL_BYTES_EXCEEDED_FIELD = new ParseField("model_bytes_exceeded"); + public static final ParseField MODEL_BYTES_MEMORY_LIMIT_FIELD = new ParseField("model_bytes_memory_limit"); public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count"); public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("total_over_field_count"); public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("total_partition_field_count"); @@ -61,6 +63,8 @@ public class ModelSizeStats implements ToXContentObject { static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); PARSER.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + PARSER.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD); + PARSER.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD); PARSER.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); PARSER.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); PARSER.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); @@ -97,6 +101,8 @@ public class ModelSizeStats implements ToXContentObject { private final String jobId; private final long modelBytes; + private final Long modelBytesExceeded; + private final Long modelBytesMemoryLimit; private final long totalByFieldCount; private final long totalOverFieldCount; private final long totalPartitionFieldCount; @@ -105,11 +111,13 @@ public class ModelSizeStats implements ToXContentObject { private final Date timestamp; private final Date logTime; - private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, - long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, - Date timestamp, Date logTime) { + private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount, + long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount, + MemoryStatus memoryStatus, Date timestamp, Date logTime) { this.jobId = jobId; this.modelBytes = modelBytes; + this.modelBytesExceeded = modelBytesExceeded; + this.modelBytesMemoryLimit = modelBytesMemoryLimit; this.totalByFieldCount = totalByFieldCount; this.totalOverFieldCount = totalOverFieldCount; this.totalPartitionFieldCount = totalPartitionFieldCount; @@ -126,6 +134,12 @@ public class ModelSizeStats implements ToXContentObject { builder.field(Job.ID.getPreferredName(), jobId); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + if (modelBytesExceeded != null) { + builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded); + } + if (modelBytesMemoryLimit != null) { + builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit); + } builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); @@ -148,6 +162,14 @@ public class ModelSizeStats implements ToXContentObject { return modelBytes; } + public Long getModelBytesExceeded() { + return modelBytesExceeded; + } + + public Long getModelBytesMemoryLimit() { + return modelBytesMemoryLimit; + } + public long getTotalByFieldCount() { return totalByFieldCount; } @@ -188,8 +210,8 @@ public class ModelSizeStats implements ToXContentObject { @Override public int hashCode() { - return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } /** @@ -207,7 +229,8 @@ public class ModelSizeStats implements ToXContentObject { ModelSizeStats that = (ModelSizeStats) other; - return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded) + && Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) && this.totalByFieldCount == that.totalByFieldCount && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) @@ -219,6 +242,8 @@ public class ModelSizeStats implements ToXContentObject { private final String jobId; private long modelBytes; + private Long modelBytesExceeded; + private Long modelBytesMemoryLimit; private long totalByFieldCount; private long totalOverFieldCount; private long totalPartitionFieldCount; @@ -236,6 +261,8 @@ public class ModelSizeStats implements ToXContentObject { public Builder(ModelSizeStats modelSizeStats) { this.jobId = modelSizeStats.jobId; this.modelBytes = modelSizeStats.modelBytes; + this.modelBytesExceeded = modelSizeStats.modelBytesExceeded; + this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit; this.totalByFieldCount = modelSizeStats.totalByFieldCount; this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; @@ -250,6 +277,16 @@ public class ModelSizeStats implements ToXContentObject { return this; } + public Builder setModelBytesExceeded(long modelBytesExceeded) { + this.modelBytesExceeded = modelBytesExceeded; + return this; + } + + public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) { + this.modelBytesMemoryLimit = modelBytesMemoryLimit; + return this; + } + public Builder setTotalByFieldCount(long totalByFieldCount) { this.totalByFieldCount = totalByFieldCount; return this; @@ -287,8 +324,8 @@ public class ModelSizeStats implements ToXContentObject { } public ModelSizeStats build() { - return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 092bc254f50..34ca5cd2aa4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -150,11 +150,15 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { private void addModelSnapshotIndexRequests(BulkRequest bulkRequest) { { + // Index a number of model snapshots, one of which contains the new model_size_stats fields + // 'model_bytes_exceeded' and 'model_bytes_memory_limit' that were introduced in 7.2.0. + // We want to verify that we can parse the snapshots whether or not these fields are present. IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX); indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541587919000, " + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", \"snapshot_id\":\"1541587919\"," + "\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," + - "\"model_bytes\":51722, \"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"model_bytes\":51722, \"model_bytes_exceeded\":10762, \"model_bytes_memory_limit\":40960, \"total_by_field_count\":3, " + + "\"total_over_field_count\":0, \"total_partition_field_count\":2," + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000," + " \"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000, \"latest_result_time_stamp\":1519930800000," + " \"retain\":false }", XContentType.JSON); @@ -223,6 +227,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -241,6 +247,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -259,6 +267,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -288,6 +298,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -306,6 +318,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -324,6 +338,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -353,6 +369,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -383,6 +401,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -402,6 +422,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -430,6 +452,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -470,6 +494,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -488,6 +514,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -517,6 +545,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java index 4a12a75f2b1..8c43feb545a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java @@ -31,6 +31,8 @@ public class ModelSizeStatsTests extends AbstractXContentTestCase {}, Result.RESULT_TYPE); parser.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + parser.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD); + parser.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD); parser.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); parser.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); parser.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); @@ -100,6 +105,8 @@ public class ModelSizeStats implements ToXContentObject, Writeable { private final String jobId; private final long modelBytes; + private final Long modelBytesExceeded; + private final Long modelBytesMemoryLimit; private final long totalByFieldCount; private final long totalOverFieldCount; private final long totalPartitionFieldCount; @@ -108,11 +115,14 @@ public class ModelSizeStats implements ToXContentObject, Writeable { private final Date timestamp; private final Date logTime; - private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, - long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, + private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount, + long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount, + MemoryStatus memoryStatus, Date timestamp, Date logTime) { this.jobId = jobId; this.modelBytes = modelBytes; + this.modelBytesExceeded = modelBytesExceeded; + this.modelBytesMemoryLimit = modelBytesMemoryLimit; this.totalByFieldCount = totalByFieldCount; this.totalOverFieldCount = totalOverFieldCount; this.totalPartitionFieldCount = totalPartitionFieldCount; @@ -125,6 +135,16 @@ public class ModelSizeStats implements ToXContentObject, Writeable { public ModelSizeStats(StreamInput in) throws IOException { jobId = in.readString(); modelBytes = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + modelBytesExceeded = in.readOptionalLong(); + } else { + modelBytesExceeded = null; + } + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + modelBytesMemoryLimit = in.readOptionalLong(); + } else { + modelBytesMemoryLimit = null; + } totalByFieldCount = in.readVLong(); totalOverFieldCount = in.readVLong(); totalPartitionFieldCount = in.readVLong(); @@ -146,6 +166,12 @@ public class ModelSizeStats implements ToXContentObject, Writeable { public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeVLong(modelBytes); + if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + out.writeOptionalLong(modelBytesExceeded); + } + if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + out.writeOptionalLong(modelBytesMemoryLimit); + } out.writeVLong(totalByFieldCount); out.writeVLong(totalOverFieldCount); out.writeVLong(totalPartitionFieldCount); @@ -171,6 +197,12 @@ public class ModelSizeStats implements ToXContentObject, Writeable { builder.field(Job.ID.getPreferredName(), jobId); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + if (modelBytesExceeded != null) { + builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded); + } + if (modelBytesMemoryLimit != null) { + builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit); + } builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); @@ -192,6 +224,14 @@ public class ModelSizeStats implements ToXContentObject, Writeable { return modelBytes; } + public Long getModelBytesExceeded() { + return modelBytesExceeded; + } + + public Long getModelBytesMemoryLimit() { + return modelBytesMemoryLimit; + } + public long getTotalByFieldCount() { return totalByFieldCount; } @@ -231,8 +271,8 @@ public class ModelSizeStats implements ToXContentObject, Writeable { @Override public int hashCode() { // this.id excluded here as it is generated by the datastore - return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } /** @@ -250,7 +290,9 @@ public class ModelSizeStats implements ToXContentObject, Writeable { ModelSizeStats that = (ModelSizeStats) other; - return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded) + && Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) + && this.totalByFieldCount == that.totalByFieldCount && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) @@ -262,6 +304,8 @@ public class ModelSizeStats implements ToXContentObject, Writeable { private final String jobId; private long modelBytes; + private Long modelBytesExceeded; + private Long modelBytesMemoryLimit; private long totalByFieldCount; private long totalOverFieldCount; private long totalPartitionFieldCount; @@ -279,6 +323,8 @@ public class ModelSizeStats implements ToXContentObject, Writeable { public Builder(ModelSizeStats modelSizeStats) { this.jobId = modelSizeStats.jobId; this.modelBytes = modelSizeStats.modelBytes; + this.modelBytesExceeded = modelSizeStats.modelBytesExceeded; + this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit; this.totalByFieldCount = modelSizeStats.totalByFieldCount; this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; @@ -293,6 +339,16 @@ public class ModelSizeStats implements ToXContentObject, Writeable { return this; } + public Builder setModelBytesExceeded(long modelBytesExceeded) { + this.modelBytesExceeded = modelBytesExceeded; + return this; + } + + public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) { + this.modelBytesMemoryLimit = modelBytesMemoryLimit; + return this; + } + public Builder setTotalByFieldCount(long totalByFieldCount) { this.totalByFieldCount = totalByFieldCount; return this; @@ -330,8 +386,8 @@ public class ModelSizeStats implements ToXContentObject, Writeable { } public ModelSizeStats build() { - return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java index e66fea90f04..90e4bacc3f8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java @@ -22,6 +22,8 @@ public class ModelSizeStatsTests extends AbstractSerializingTestCase Date: Sun, 19 May 2019 08:29:50 -0400 Subject: [PATCH 63/67] [ML] Temporarily muting failing tests Muting a number of AutoDetectMemoryLimitIT tests to give CI a chance to settle before easing in required backend changes. relates elastic/ml-cpp#486 relates #42086 --- .../xpack/ml/integration/AutodetectMemoryLimitIT.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index 03860ea9ae0..2f005914205 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -84,6 +84,7 @@ public class AutodetectMemoryLimitIT extends MlNativeAutodetectIntegTestCase { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testTooManyByFields() throws Exception { Detector.Builder detector = new Detector.Builder("count", null); detector.setByFieldName("user"); @@ -129,6 +130,7 @@ public class AutodetectMemoryLimitIT extends MlNativeAutodetectIntegTestCase { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testTooManyByAndOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("count", null); detector.setByFieldName("department"); @@ -178,6 +180,7 @@ public class AutodetectMemoryLimitIT extends MlNativeAutodetectIntegTestCase { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testManyDistinctOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("sum", "value"); detector.setOverFieldName("user"); From 1362944c23694fe7df8da50d36f95c1b0144e404 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 19 May 2019 20:43:41 -0400 Subject: [PATCH 64/67] Minor improvement translog docs (#42184) Closes #42183 --- docs/reference/index-modules/translog.asciidoc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 705fb81b09c..6821e583a79 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -29,13 +29,11 @@ The data in the translog is only persisted to disk when the translog is ++fsync++ed and committed. In the event of hardware failure, any data written since the previous translog commit will be lost. -By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds -if `index.translog.durability` is set to `async` or if set to `request` -(default) at the end of every <>, <>, -<>, or <> request. More precisely, if set -to `request`, Elasticsearch will only report success of an index, delete, +By default, `index.translog.durability` is set to `request` meaning that Elasticsearch will only report success of an index, delete, update, or bulk request to the client after the translog has been successfully -++fsync++ed and committed on the primary and on every allocated replica. +++fsync++ed and committed on the primary and on every allocated replica. If +`index.translog.durability` is set to `async` then Elasticsearch ++fsync++s +and commits the translog every `index.translog.sync_interval` (defaults to 5 seconds). The following <> per-index settings control the behaviour of the translog: From 0ec7986049ee9e65cc5c630ae6ff1401cc08ae0c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 19 May 2019 22:05:52 -0400 Subject: [PATCH 65/67] Enable debug log in testRetentionLeasesSyncOnRecovery Relates #39105 --- .../java/org/elasticsearch/index/seqno/RetentionLeaseIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 92d31e305ad..cb40a0726d4 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -356,7 +356,7 @@ public class RetentionLeaseIT extends ESIntegTestCase { assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); } - @TestLogging(value = "org.elasticsearch.indices.recovery:trace") + @TestLogging(value = "org.elasticsearch.index:debug,org.elasticsearch.indices.recovery:trace") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); From b7599472aced11c47c510823ffadfb03050abe32 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 20 May 2019 04:02:01 -0400 Subject: [PATCH 66/67] Fix random failure in SearchRequestTests#testRandomVersionSerialization (#42069) This commit fixes a test bug that ends up comparing the result of two consecutive calls to System.currentTimeMillis that can be different on slow CIs. Closes #42064 --- .../org/elasticsearch/action/search/SearchRequest.java | 9 ++++++++- .../elasticsearch/action/search/SearchRequestTests.java | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index ea5c442f505..dea5283a629 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -302,12 +302,19 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search * request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. - * */ long getOrCreateAbsoluteStartMillis() { return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; } + /** + * Returns the provided absoluteStartMillis when created through {@link #crossClusterSearch} and + * -1 otherwise. + */ + long getAbsoluteStartMillis() { + return absoluteStartMillis; + } + /** * Sets the indices the search will be executed on. */ diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 8aa35080f99..5aa0d937b98 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -94,7 +94,7 @@ public class SearchRequestTests extends AbstractSearchTestCase { assertTrue(deserializedRequest.isFinalReduce()); } else { assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); - assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); + assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); } } From 8f838198fa89841d177a07a9fe65ab89cbed1b0e Mon Sep 17 00:00:00 2001 From: Russ Cam Date: Mon, 20 May 2019 06:07:28 -0400 Subject: [PATCH 67/67] Remove parent query string parameter (#41098) This commit removes the deprecated parent query string parameter. The routing parameter should be used instead. --- .../src/main/resources/rest-api-spec/api/create.json | 4 ---- .../src/main/resources/rest-api-spec/api/delete.json | 4 ---- .../src/main/resources/rest-api-spec/api/exists.json | 4 ---- .../src/main/resources/rest-api-spec/api/exists_source.json | 4 ---- .../src/main/resources/rest-api-spec/api/explain.json | 4 ---- rest-api-spec/src/main/resources/rest-api-spec/api/get.json | 4 ---- .../src/main/resources/rest-api-spec/api/get_source.json | 4 ---- .../src/main/resources/rest-api-spec/api/index.json | 4 ---- .../src/main/resources/rest-api-spec/api/mtermvectors.json | 5 ----- .../src/main/resources/rest-api-spec/api/termvectors.json | 5 ----- .../src/main/resources/rest-api-spec/api/update.json | 4 ---- 11 files changed, 46 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index f21d2606364..65fcf02807b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -33,10 +33,6 @@ "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "parent": { - "type" : "string", - "description" : "ID of the parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 792f9d89609..01523740288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -33,10 +33,6 @@ "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "parent": { - "type" : "string", - "description" : "ID of parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index 3debd3edce5..2a451344521 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -33,10 +33,6 @@ "type": "list", "description" : "A comma-separated list of stored fields to return in the response" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 89f9c33e5fb..30e56141ec0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -30,10 +30,6 @@ } }, "params": { - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 12aa7a8dca9..203ef23c9cc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -55,10 +55,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index cc20ceecc4e..a6d77ec811b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -33,10 +33,6 @@ "type": "list", "description" : "A comma-separated list of stored fields to return in the response" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index a26691edc41..d6f6964aa7c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -30,10 +30,6 @@ } }, "params": { - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 2a2053d2250..438032980a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -43,10 +43,6 @@ "default" : "index", "description" : "Explicit operation type" }, - "parent": { - "type" : "string", - "description" : "ID of the parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json index 8cf4b22e90d..d41bfa3b1c1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json @@ -73,11 +73,6 @@ "description" : "Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", "required" : false }, - "parent" : { - "type" : "string", - "description" : "Parent id of documents. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "required" : false - }, "realtime": { "type": "boolean", "description": "Specifies if requests are real-time as opposed to near-real-time (default: true).", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json index 44b972b355f..ca85d4f7162 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json @@ -79,11 +79,6 @@ "description" : "Specific routing value.", "required" : false }, - "parent": { - "type" : "string", - "description" : "Parent id of documents.", - "required" : false - }, "realtime": { "type": "boolean", "description": "Specifies if request is real-time as opposed to near-real-time (default: true).", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index b85c70be57d..02435190674 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -49,10 +49,6 @@ "type": "string", "description": "The script language (default: painless)" }, - "parent": { - "type": "string", - "description": "ID of the parent document. Is is only used for routing and when for the upsert request" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"],