From 826399f9fce7792b5a5fbb9456599144b4d6bd23 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 6 Aug 2018 11:48:50 +0200 Subject: [PATCH 01/16] Cross-cluster search: preserve cluster alias in shard failures (#32608) When some remote clusters return shard failures as part of a cross-cluster search request, the cluster alias currently gets lost. As a result, if the shard failures are all caused by the same error, and against indices belonging to different clusters, but with the same index name, only one failure gets returned as part of the search response, meaning that failures are grouped by index name, ignoring the cluster alias. With this commit we make sure that `ShardSearchFailure` returns the cluster alias as part of the index name. Also, we set the fully qualfied index name when creating a `QueryShardException`. That way shard failures are grouped by cluster:index. Such fixes should cover at least most of the cases where either 1) the shard target is set but we don't have the index in the cause (we were previously reading it only from the cause that did not have the cluster alias) 2) the shard target is missing but if the cause is a `QueryShardException` the cluster alias does not get lost. We also prevent NPE in case the failure cause is not set and test such scenario. --- .../org/elasticsearch/ExceptionsHelper.java | 59 ++++--- .../action/search/ShardSearchFailure.java | 13 +- .../index/query/QueryShardException.java | 2 +- .../search/SearchShardTarget.java | 11 +- .../elasticsearch/ExceptionsHelperTests.java | 115 ++++++++++++++ .../search/ShardSearchFailureTests.java | 21 ++- .../rest/action/RestActionsTests.java | 150 +++++++++++++++++- 7 files changed, 336 insertions(+), 35 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index dff14bc8b39..d4bac8066f1 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -39,6 +39,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.Queue; import java.util.Set; @@ -278,7 +279,7 @@ public final class ExceptionsHelper { List uniqueFailures = new ArrayList<>(); Set reasons = new HashSet<>(); for (ShardOperationFailedException failure : failures) { - GroupBy reason = new GroupBy(failure.getCause()); + GroupBy reason = new GroupBy(failure); if (reasons.contains(reason) == false) { reasons.add(reason); uniqueFailures.add(failure); @@ -287,46 +288,52 @@ public final class ExceptionsHelper { return uniqueFailures.toArray(new ShardOperationFailedException[0]); } - static class GroupBy { + private static class GroupBy { final String reason; final String index; final Class causeType; - GroupBy(Throwable t) { - if (t instanceof ElasticsearchException) { - final Index index = ((ElasticsearchException) t).getIndex(); - if (index != null) { - this.index = index.getName(); - } else { - this.index = null; + GroupBy(ShardOperationFailedException failure) { + Throwable cause = failure.getCause(); + //the index name from the failure contains the cluster alias when using CCS. Ideally failures should be grouped by + //index name and cluster alias. That's why the failure index name has the precedence over the one coming from the cause, + //which does not include the cluster alias. + String indexName = failure.index(); + if (indexName == null) { + if (cause instanceof ElasticsearchException) { + final Index index = ((ElasticsearchException) cause).getIndex(); + if (index != null) { + indexName = index.getName(); + } } - } else { - index = null; } - reason = t.getMessage(); - causeType = t.getClass(); + this.index = indexName; + if (cause == null) { + this.reason = failure.reason(); + this.causeType = null; + } else { + this.reason = cause.getMessage(); + this.causeType = cause.getClass(); + } } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } GroupBy groupBy = (GroupBy) o; - - if (!causeType.equals(groupBy.causeType)) return false; - if (index != null ? !index.equals(groupBy.index) : groupBy.index != null) return false; - if (reason != null ? !reason.equals(groupBy.reason) : groupBy.reason != null) return false; - - return true; + return Objects.equals(reason, groupBy.reason) && + Objects.equals(index, groupBy.index) && + Objects.equals(causeType, groupBy.causeType); } @Override public int hashCode() { - int result = reason != null ? reason.hashCode() : 0; - result = 31 * result + (index != null ? index.hashCode() : 0); - result = 31 * result + causeType.hashCode(); - return result; + return Objects.hash(reason, index, causeType); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index f2ba62fefd4..0a8fe3c6743 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchException; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.transport.RemoteClusterAware; import java.io.IOException; @@ -66,7 +67,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { public ShardSearchFailure(Exception e, @Nullable SearchShardTarget shardTarget) { final Throwable actual = ExceptionsHelper.unwrapCause(e); - if (actual != null && actual instanceof SearchException) { + if (actual instanceof SearchException) { this.shardTarget = ((SearchException) actual).shard(); } else if (shardTarget != null) { this.shardTarget = shardTarget; @@ -105,7 +106,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public String index() { if (shardTarget != null) { - return shardTarget.getIndex(); + return shardTarget.getFullyQualifiedIndexName(); } return null; } @@ -186,6 +187,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { String currentFieldName = null; int shardId = -1; String indexName = null; + String clusterAlias = null; String nodeId = null; ElasticsearchException exception = null; while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -196,6 +198,11 @@ public class ShardSearchFailure implements ShardOperationFailedException { shardId = parser.intValue(); } else if (INDEX_FIELD.equals(currentFieldName)) { indexName = parser.text(); + int indexOf = indexName.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR); + if (indexOf > 0) { + clusterAlias = indexName.substring(0, indexOf); + indexName = indexName.substring(indexOf + 1); + } } else if (NODE_FIELD.equals(currentFieldName)) { nodeId = parser.text(); } else { @@ -214,7 +221,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { SearchShardTarget searchShardTarget = null; if (nodeId != null) { searchShardTarget = new SearchShardTarget(nodeId, - new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), null, OriginalIndices.NONE); + new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), clusterAlias, OriginalIndices.NONE); } return new ShardSearchFailure(exception, searchShardTarget); } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java index 9b6ce3a6e4b..b52bc07ca78 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java @@ -38,7 +38,7 @@ public class QueryShardException extends ElasticsearchException { public QueryShardException(QueryShardContext context, String msg, Throwable cause, Object... args) { super(msg, cause, args); - setIndex(context.index()); + setIndex(context.getFullyQualifiedIndexName()); } /** diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java index faf415b54ae..19c0f8c64d5 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -19,8 +19,6 @@ package org.elasticsearch.search; -import java.io.IOException; - import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; @@ -32,6 +30,8 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.RemoteClusterAware; +import java.io.IOException; + /** * The target that the search request was executed on. */ @@ -96,6 +96,13 @@ public final class SearchShardTarget implements Writeable, Comparable Date: Mon, 6 Aug 2018 14:56:21 +0200 Subject: [PATCH 02/16] LOGGING: Upgrade to Log4J 2.11.1 (#32616) * LOGGING: Upgrade to Log4J 2.11.1 * Upgrade to `2.11.1` to fix memory leaks in slow logger when logging large requests * This was caused by a bug in Log4J https://issues.apache.org/jira/browse/LOG4J2-2269 and is fixed in `2.11.1` via https://git-wip-us.apache.org/repos/asf?p=logging-log4j2.git;h=9496c0c * Fixes #32537 * Fixes #27300 --- buildSrc/version.properties | 2 +- docs/java-api/index.asciidoc | 4 ++-- plugins/repository-hdfs/build.gradle | 1 - .../licenses/log4j-slf4j-impl-2.11.1.jar.sha1 | 1 + .../licenses/log4j-slf4j-impl-2.9.1.jar.sha1 | 1 - server/build.gradle | 17 ++++++++++------- server/licenses/log4j-1.2-api-2.11.1.jar.sha1 | 1 + server/licenses/log4j-1.2-api-2.9.1.jar.sha1 | 1 - server/licenses/log4j-api-2.11.1.jar.sha1 | 1 + server/licenses/log4j-api-2.9.1.jar.sha1 | 1 - server/licenses/log4j-core-2.11.1.jar.sha1 | 1 + server/licenses/log4j-core-2.9.1.jar.sha1 | 1 - .../common/logging/LoggersTests.java | 2 +- test/logger-usage/build.gradle | 10 ++++++++++ .../licenses/log4j-slf4j-impl-2.11.1.jar.sha1 | 1 + .../licenses/log4j-slf4j-impl-2.9.1.jar.sha1 | 1 - x-pack/plugin/sql/sql-action/build.gradle | 18 ++++++++++-------- .../licenses/log4j-api-2.11.1.jar.sha1 | 1 + .../licenses/log4j-api-2.9.1.jar.sha1 | 1 - .../licenses/log4j-core-2.11.1.jar.sha1 | 1 + .../licenses/log4j-core-2.9.1.jar.sha1 | 1 - 21 files changed, 41 insertions(+), 27 deletions(-) create mode 100644 plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 create mode 100644 server/licenses/log4j-1.2-api-2.11.1.jar.sha1 delete mode 100644 server/licenses/log4j-1.2-api-2.9.1.jar.sha1 create mode 100644 server/licenses/log4j-api-2.11.1.jar.sha1 delete mode 100644 server/licenses/log4j-api-2.9.1.jar.sha1 create mode 100644 server/licenses/log4j-core-2.11.1.jar.sha1 delete mode 100644 server/licenses/log4j-core-2.9.1.jar.sha1 create mode 100644 x-pack/plugin/security/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/log4j-api-2.11.1.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/log4j-api-2.9.1.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/log4j-core-2.11.1.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/log4j-core-2.9.1.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 023d5d5b8dc..690bceb1e1a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,7 +7,7 @@ jts = 1.15.0 jackson = 2.8.10 snakeyaml = 1.17 # when updating log4j, please update also docs/java-api/index.asciidoc -log4j = 2.9.1 +log4j = 2.11.1 slf4j = 1.6.2 # when updating the JNA version, also update the version in buildSrc/build.gradle diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index 4fb7db4c4ab..5c3a94d57f4 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -81,7 +81,7 @@ You need to also include Log4j 2 dependencies: org.apache.logging.log4j log4j-core - 2.9.1 + 2.11.1 -------------------------------------------------- @@ -109,7 +109,7 @@ If you want to use another logger than Log4j 2, you can use http://www.slf4j.org org.apache.logging.log4j log4j-to-slf4j - 2.9.1 + 2.11.1 org.slf4j diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 8856ae1526a..6debaf5282f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -564,7 +564,6 @@ thirdPartyAudit.excludes = [ // we are not pulling in slf4j-ext, this is okay, Log4j will fallback gracefully 'org.slf4j.ext.EventData', - 'org.apache.log4j.AppenderSkeleton', 'org.apache.log4j.AsyncAppender', 'org.apache.log4j.helpers.ISO8601DateFormat', 'org.apache.log4j.spi.ThrowableInformation', diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 new file mode 100644 index 00000000000..6178556b318 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 @@ -0,0 +1 @@ +4b41b53a3a2d299ce381a69d165381ca19f62912 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 deleted file mode 100644 index 66119e87e21..00000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a97a849b18b3798c4af1a2ca5b10c66cef17e3a \ No newline at end of file diff --git a/server/build.gradle b/server/build.gradle index deb38398979..1964eddd03e 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -220,7 +220,6 @@ thirdPartyAudit.excludes = [ 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', 'com.fasterxml.jackson.dataformat.xml.XmlMapper', 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', - 'com.fasterxml.jackson.databind.node.JsonNodeFactory', 'com.fasterxml.jackson.databind.node.ObjectNode', 'org.fusesource.jansi.Ansi', 'org.fusesource.jansi.AnsiRenderer$Code', @@ -262,12 +261,6 @@ thirdPartyAudit.excludes = [ 'javax.mail.internet.MimeMultipart', 'javax.mail.internet.MimeUtility', 'javax.mail.util.ByteArrayDataSource', - 'javax.persistence.AttributeConverter', - 'javax.persistence.EntityManager', - 'javax.persistence.EntityManagerFactory', - 'javax.persistence.EntityTransaction', - 'javax.persistence.Persistence', - 'javax.persistence.PersistenceException', 'org.apache.commons.compress.compressors.CompressorStreamFactory', 'org.apache.commons.compress.utils.IOUtils', 'org.apache.commons.csv.CSVFormat', @@ -311,6 +304,16 @@ thirdPartyAudit.excludes = [ 'com.google.common.geometry.S2LatLng', ] +if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + // Used by Log4J 2.11.1 + thirdPartyAudit.excludes += [ + 'java.io.ObjectInputFilter', + 'java.io.ObjectInputFilter$Config', + 'java.io.ObjectInputFilter$FilterInfo', + 'java.io.ObjectInputFilter$Status' + ] +} + if (JavaVersion.current() > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter'] } diff --git a/server/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/server/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 00000000000..575d75dbda8 --- /dev/null +++ b/server/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/server/licenses/log4j-1.2-api-2.9.1.jar.sha1 b/server/licenses/log4j-1.2-api-2.9.1.jar.sha1 deleted file mode 100644 index 0b5acc62b7a..00000000000 --- a/server/licenses/log4j-1.2-api-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -894f96d677880d4ab834a1356f62b875e579caaa \ No newline at end of file diff --git a/server/licenses/log4j-api-2.11.1.jar.sha1 b/server/licenses/log4j-api-2.11.1.jar.sha1 new file mode 100644 index 00000000000..4b1bfffac17 --- /dev/null +++ b/server/licenses/log4j-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +268f0fe4df3eefe052b57c87ec48517d64fb2a10 \ No newline at end of file diff --git a/server/licenses/log4j-api-2.9.1.jar.sha1 b/server/licenses/log4j-api-2.9.1.jar.sha1 deleted file mode 100644 index e1a89fadfed..00000000000 --- a/server/licenses/log4j-api-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/server/licenses/log4j-core-2.11.1.jar.sha1 b/server/licenses/log4j-core-2.11.1.jar.sha1 new file mode 100644 index 00000000000..2fb8589380a --- /dev/null +++ b/server/licenses/log4j-core-2.11.1.jar.sha1 @@ -0,0 +1 @@ +592a48674c926b01a9a747c7831bcd82a9e6d6e4 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.9.1.jar.sha1 b/server/licenses/log4j-core-2.9.1.jar.sha1 deleted file mode 100644 index 990ea322a76..00000000000 --- a/server/licenses/log4j-core-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c041978c686866ee8534f538c6220238db3bb6be \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index 6c18bd0afab..9b69a876c1d 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -46,7 +46,7 @@ public class LoggersTests extends ESTestCase { @Override public void append(LogEvent event) { - lastEvent = event; + lastEvent = event.toImmutable(); } ParameterizedMessage lastParameterizedMessage() { diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index 98fe76bfcdc..c16dab6a625 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -45,3 +45,13 @@ thirdPartyAudit.excludes = [ 'org.osgi.framework.wiring.BundleWire', 'org.osgi.framework.wiring.BundleWiring' ] + +if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + // Used by Log4J 2.11.1 + thirdPartyAudit.excludes += [ + 'java.io.ObjectInputFilter', + 'java.io.ObjectInputFilter$Config', + 'java.io.ObjectInputFilter$FilterInfo', + 'java.io.ObjectInputFilter$Status' + ] +} \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 b/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 new file mode 100644 index 00000000000..6178556b318 --- /dev/null +++ b/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.11.1.jar.sha1 @@ -0,0 +1 @@ +4b41b53a3a2d299ce381a69d165381ca19f62912 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 b/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 deleted file mode 100644 index 66119e87e21..00000000000 --- a/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a97a849b18b3798c4af1a2ca5b10c66cef17e3a \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/build.gradle b/x-pack/plugin/sql/sql-action/build.gradle index f6b5177d508..bf79fd824ef 100644 --- a/x-pack/plugin/sql/sql-action/build.gradle +++ b/x-pack/plugin/sql/sql-action/build.gradle @@ -76,8 +76,6 @@ thirdPartyAudit.excludes = [ 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', 'com.fasterxml.jackson.dataformat.xml.XmlMapper', 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', - 'com.fasterxml.jackson.databind.node.JsonNodeFactory', - 'com.fasterxml.jackson.databind.node.ObjectNode', 'com.lmax.disruptor.BlockingWaitStrategy', 'com.lmax.disruptor.BusySpinWaitStrategy', 'com.lmax.disruptor.EventFactory', @@ -116,12 +114,6 @@ thirdPartyAudit.excludes = [ 'javax.mail.internet.MimeMultipart', 'javax.mail.internet.MimeUtility', 'javax.mail.util.ByteArrayDataSource', - 'javax.persistence.AttributeConverter', - 'javax.persistence.EntityManager', - 'javax.persistence.EntityManagerFactory', - 'javax.persistence.EntityTransaction', - 'javax.persistence.Persistence', - 'javax.persistence.PersistenceException', 'org.apache.commons.compress.compressors.CompressorStreamFactory', 'org.apache.commons.compress.utils.IOUtils', 'org.apache.commons.csv.CSVFormat', @@ -150,3 +142,13 @@ thirdPartyAudit.excludes = [ 'org.zeromq.ZMQ$Socket', 'org.zeromq.ZMQ' ] + +if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + // Used by Log4J 2.11.1 + thirdPartyAudit.excludes += [ + 'java.io.ObjectInputFilter', + 'java.io.ObjectInputFilter$Config', + 'java.io.ObjectInputFilter$FilterInfo', + 'java.io.ObjectInputFilter$Status' + ] +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/log4j-api-2.11.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/log4j-api-2.11.1.jar.sha1 new file mode 100644 index 00000000000..4b1bfffac17 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/log4j-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +268f0fe4df3eefe052b57c87ec48517d64fb2a10 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/log4j-api-2.9.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/log4j-api-2.9.1.jar.sha1 deleted file mode 100644 index e1a89fadfed..00000000000 --- a/x-pack/plugin/sql/sql-action/licenses/log4j-api-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/log4j-core-2.11.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/log4j-core-2.11.1.jar.sha1 new file mode 100644 index 00000000000..2fb8589380a --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/log4j-core-2.11.1.jar.sha1 @@ -0,0 +1 @@ +592a48674c926b01a9a747c7831bcd82a9e6d6e4 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/log4j-core-2.9.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/log4j-core-2.9.1.jar.sha1 deleted file mode 100644 index 990ea322a76..00000000000 --- a/x-pack/plugin/sql/sql-action/licenses/log4j-core-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c041978c686866ee8534f538c6220238db3bb6be \ No newline at end of file From 615aa85f4eb667649cb053c384d62ffb65e13f57 Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Mon, 6 Aug 2018 23:51:43 +1000 Subject: [PATCH 03/16] [Kerberos] Use canonical host name (#32588) The Apache Http components support for Spnego scheme uses canonical name by default. Also when resolving host name, on centos by default there are other aliases so adding them to the DelegationPermission. Closes#32498 --- x-pack/qa/kerberos-tests/build.gradle | 2 +- .../security/authc/kerberos/KerberosAuthenticationIT.java | 2 +- .../kerberos-tests/src/test/resources/plugin-security.policy | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 7138b930512..59667d9ee78 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -41,7 +41,7 @@ Object httpPrincipal = new Object() { @Override String toString() { InetAddress resolvedAddress = InetAddress.getByName('127.0.0.1') - return "HTTP/" + resolvedAddress.getHostName() + return "HTTP/" + resolvedAddress.getCanonicalHostName() } } diff --git a/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java b/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java index ed9f4fbe38d..b6ebfde2079 100644 --- a/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java +++ b/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java @@ -112,7 +112,7 @@ public class KerberosAuthenticationIT extends ESRestTestCase { protected HttpHost buildHttpHost(String host, int port) { try { InetAddress inetAddress = InetAddress.getByName(host); - return super.buildHttpHost(inetAddress.getHostName(), port); + return super.buildHttpHost(inetAddress.getCanonicalHostName(), port); } catch (UnknownHostException e) { assumeNoException("failed to resolve host [" + host + "]", e); } diff --git a/x-pack/qa/kerberos-tests/src/test/resources/plugin-security.policy b/x-pack/qa/kerberos-tests/src/test/resources/plugin-security.policy index fb7936bf620..84219494bf2 100644 --- a/x-pack/qa/kerberos-tests/src/test/resources/plugin-security.policy +++ b/x-pack/qa/kerberos-tests/src/test/resources/plugin-security.policy @@ -1,4 +1,7 @@ grant { permission javax.security.auth.AuthPermission "doAsPrivileged"; permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\""; + permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost.localdomain@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\""; + permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost4@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\""; + permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost4.localdomain4@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\""; }; \ No newline at end of file From e641fccfe3e7ce7ee9d41235cf29800d440e0d26 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 6 Aug 2018 07:15:40 -0700 Subject: [PATCH 04/16] Rest HL client: Add get license action (#32438) Rest HL client: Add get license action Continues to use String instead of a more complex License class to hold the license text similarly to put license. Relates #29827 --- .../elasticsearch/client/LicenseClient.java | 71 ++++++++++++++++++- .../client/RequestConverters.java | 21 +++++- .../LicensingDocumentationIT.java | 62 ++++++++++++++++ .../high-level/licensing/get-license.asciidoc | 50 +++++++++++++ .../license/GetLicenseRequest.java | 28 -------- .../license/GetLicenseRequestBuilder.java | 1 + .../license/LicensingClient.java | 1 + .../license/RestGetLicenseAction.java | 1 + .../license/TransportGetLicenseAction.java | 1 + .../xpack/license/GetLicenseRequest.java | 41 +++++++++++ .../xpack/license/GetLicenseResponse.java | 38 ++++++++++ 11 files changed, 284 insertions(+), 31 deletions(-) create mode 100644 docs/java-rest/high-level/licensing/get-license.asciidoc delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java index 587578f3b35..94bf6243835 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java @@ -19,11 +19,25 @@ package org.elasticsearch.client; +import org.apache.http.HttpEntity; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; +import org.elasticsearch.protocol.xpack.license.GetLicenseResponse; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import static java.util.Collections.emptySet; @@ -54,7 +68,7 @@ public class LicenseClient { } /** - * Asynchronously updates license for the cluster cluster. + * Asynchronously updates license for the cluster. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion */ @@ -63,4 +77,59 @@ public class LicenseClient { PutLicenseResponse::fromXContent, listener, emptySet()); } + /** + * Returns the current license for the cluster. + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetLicenseResponse getLicense(GetLicenseRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequest(request, RequestConverters::getLicense, options, + response -> new GetLicenseResponse(convertResponseToJson(response)), emptySet()); + } + + /** + * Asynchronously returns the current license for the cluster cluster. + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getLicenseAsync(GetLicenseRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsync(request, RequestConverters::getLicense, options, + response -> new GetLicenseResponse(convertResponseToJson(response)), listener, emptySet()); + } + + + /** + * Converts an entire response into a json sting + * + * This is useful for responses that we don't parse on the client side, but instead work as string + * such as in case of the license JSON + */ + static String convertResponseToJson(Response response) throws IOException { + HttpEntity entity = response.getEntity(); + if (entity == null) { + throw new IllegalStateException("Response body expected but not returned"); + } + if (entity.getContentType() == null) { + throw new IllegalStateException("Elasticsearch didn't return the [Content-Type] header, unable to parse response body"); + } + XContentType xContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + if (xContentType == null) { + throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); + } + if (xContentType == XContentType.JSON) { + // No changes is required + return Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)); + } else { + // Need to convert into JSON + try (InputStream stream = response.getEntity().getContent(); + XContentParser parser = XContentFactory.xContent(xContentType).createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) { + parser.nextToken(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.copyCurrentStructure(parser); + return Strings.toString(builder); + } + } + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index ce6fd1c8c94..b57ce017af4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -107,10 +107,11 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; +import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -1154,7 +1155,11 @@ final class RequestConverters { } static Request putLicense(PutLicenseRequest putLicenseRequest) { - Request request = new Request(HttpPut.METHOD_NAME, "/_xpack/license"); + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("license") + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); Params parameters = new Params(request); parameters.withTimeout(putLicenseRequest.timeout()); parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout()); @@ -1165,6 +1170,18 @@ final class RequestConverters { return request; } + + static Request getLicense(GetLicenseRequest getLicenseRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("license") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + Params parameters = new Params(request); + parameters.withLocal(getLicenseRequest.local()); + return request; + } + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java index dc8ea568446..7173d1eb336 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java @@ -25,6 +25,8 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; +import org.elasticsearch.protocol.xpack.license.GetLicenseResponse; import org.elasticsearch.protocol.xpack.license.LicensesStatus; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; @@ -33,6 +35,8 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; @@ -105,4 +109,62 @@ public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testGetLicense() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::get-license-execute + GetLicenseRequest request = new GetLicenseRequest(); + + GetLicenseResponse response = client.license().getLicense(request, RequestOptions.DEFAULT); + //end::get-license-execute + + //tag::get-license-response + String currentLicense = response.getLicenseDefinition(); // <1> + //end::get-license-response + + assertThat(currentLicense, containsString("trial")); + assertThat(currentLicense, containsString("client_rest-high-level_integTestCluster")); + } + { + GetLicenseRequest request = new GetLicenseRequest(); + // tag::get-license-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetLicenseResponse indexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-license-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-license-execute-async + client.license().getLicenseAsync( + request, RequestOptions.DEFAULT, listener); // <1> + // end::get-license-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + { + GetLicenseRequest request = new GetLicenseRequest(); + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + // Make sure that it still works in other formats + builder.addHeader("Accept", randomFrom("application/smile", "application/cbor")); + RequestOptions options = builder.build(); + GetLicenseResponse response = client.license().getLicense(request, options); + String currentLicense = response.getLicenseDefinition(); + assertThat(currentLicense, startsWith("{")); + assertThat(currentLicense, containsString("trial")); + assertThat(currentLicense, containsString("client_rest-high-level_integTestCluster")); + assertThat(currentLicense, endsWith("}")); + } + } } diff --git a/docs/java-rest/high-level/licensing/get-license.asciidoc b/docs/java-rest/high-level/licensing/get-license.asciidoc new file mode 100644 index 00000000000..17eb89450fb --- /dev/null +++ b/docs/java-rest/high-level/licensing/get-license.asciidoc @@ -0,0 +1,50 @@ +[[java-rest-high-get-license]] +=== Get License + +[[java-rest-high-get-license-execution]] +==== Execution + +The license can be added or updated using the `getLicense()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-execute] +-------------------------------------------------- + +[[java-rest-high-get-license-response]] +==== Response + +The returned `GetLicenseResponse` contains the license in the JSON format. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-response] +-------------------------------------------------- +<1> The text of the license. + +[[java-rest-high-get-license-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-execute-async] +-------------------------------------------------- +<1> The `GetLicenseRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `GetLicenseResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java deleted file mode 100644 index 914e18772af..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.license; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - - -public class GetLicenseRequest extends MasterNodeReadRequest { - - public GetLicenseRequest() { - } - - public GetLicenseRequest(StreamInput in) throws IOException { - super(in); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java index 7e92a54bce2..ed72e674687 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.license; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; public class GetLicenseRequestBuilder extends MasterNodeReadOperationRequestBuilder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java index 14a059e9e01..9adfba64119 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java @@ -7,6 +7,7 @@ package org.elasticsearch.license; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; public class LicensingClient { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java index 31004823e49..2d72bc3bed5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java index ba6da84f19b..de55a664277 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java new file mode 100644 index 00000000000..c669d3d3377 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + + +public class GetLicenseRequest extends MasterNodeReadRequest { + + public GetLicenseRequest() { + } + + public GetLicenseRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java new file mode 100644 index 00000000000..7232e185a7e --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionResponse; + +public class GetLicenseResponse extends ActionResponse { + + private String license; + + GetLicenseResponse() { + } + + public GetLicenseResponse(String license) { + this.license = license; + } + + public String getLicenseDefinition() { + return license; + } + +} From 014b2772db003635bcb971df40e53d81e1549062 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 6 Aug 2018 18:35:18 +0200 Subject: [PATCH 05/16] [TEST] Fix testReplicaTermIncrementWithConcurrentPrimaryPromotion The assertion in the test was not broad enough. If the timing is very unlucky, the shard is already promoted to primary before the indexOnReplica even gets to execute. Closes #32645 --- .../index/replication/IndexLevelReplicationTests.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 1e2c00e5896..f38550d7041 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -67,6 +67,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -261,7 +262,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase barrier.await(); indexOnReplica(replicationRequest, shards, replica2, newReplica1Term); } catch (IllegalStateException ise) { - assertThat(ise.getMessage(), containsString("is too old")); + assertThat(ise.getMessage(), either(containsString("is too old")).or(containsString("cannot be a replication target"))); } catch (Exception e) { throw new RuntimeException(e); } @@ -303,7 +304,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase indexOnReplica(replicationRequest, shards, replica, primaryPrimaryTerm); successFullyIndexed.set(true); } catch (IllegalStateException ise) { - assertThat(ise.getMessage(), containsString("is too old")); + assertThat(ise.getMessage(), either(containsString("is too old")).or(containsString("cannot be a replication target"))); } catch (Exception e) { throw new RuntimeException(e); } From e01e4393a8f867d4a23b8a3d6f999e542439759d Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Mon, 6 Aug 2018 12:00:07 -0700 Subject: [PATCH 06/16] [Docs] Light edit to info about docker images (#32376) --- docs/reference/setup/install/docker.asciidoc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 523217b921a..e2e5c6ab70b 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -4,9 +4,9 @@ {es} is also available as Docker images. The images use https://hub.docker.com/_/centos/[centos:7] as the base image. -A list of all published Docker images and tags can be found in -https://www.docker.elastic.co[www.docker.elastic.co]. The source code can be found -on https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub]. +A list of all published Docker images and tags is available at +https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in +https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub]. These images are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. @@ -29,15 +29,13 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -For example, the Docker image can be retrieved with the following command: - ["source","sh",subs="attributes"] -------------------------------------------- docker pull {docker-repo}:{version} -------------------------------------------- Alternatively, you can download other Docker images that contain only features -that are available under the Apache 2.0 license from +available under the Apache 2.0 license. To download the images, go to https://www.docker.elastic.co[www.docker.elastic.co]. endif::[] From b2a0f38a0c9f0fd9127e63e22a94f253b7b6d9e5 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 6 Aug 2018 15:33:18 -0500 Subject: [PATCH 07/16] Adding xpack.core.ml.datafeed to protocol.xpack.ml.datafeed (#32625) * Adding org.elasticsearch.xpack.core.ml.datafeed to org.elasticsearch.protocol.xpack.ml.datafeed * removing unused ParseField and import * Addressing PR feed back and fixing tests * Simplifying Datafeed(Config|Update) ctor parser --- .../xpack/ml/datafeed/ChunkingConfig.java | 134 +++++++ .../xpack/ml/datafeed/DatafeedConfig.java | 329 ++++++++++++++++++ .../xpack/ml/datafeed/DatafeedUpdate.java | 310 +++++++++++++++++ .../ml/datafeed/ChunkingConfigTests.java | 59 ++++ .../ml/datafeed/DatafeedConfigTests.java | 177 ++++++++++ .../ml/datafeed/DatafeedUpdateTests.java | 101 ++++++ 6 files changed, 1110 insertions(+) create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java create mode 100644 x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java create mode 100644 x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java create mode 100644 x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java new file mode 100644 index 00000000000..0b9d9f12046 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.datafeed; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * The description of how searches should be chunked. + */ +public class ChunkingConfig implements ToXContentObject { + + public static final ParseField MODE_FIELD = new ParseField("mode"); + public static final ParseField TIME_SPAN_FIELD = new ParseField("time_span"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "chunking_config", true, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Mode.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, MODE_FIELD, ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, TIME_SPAN_FIELD, ValueType.STRING); + + } + + private final Mode mode; + private final TimeValue timeSpan; + + + ChunkingConfig(Mode mode, @Nullable TimeValue timeSpan) { + this.mode = Objects.requireNonNull(mode, MODE_FIELD.getPreferredName()); + this.timeSpan = timeSpan; + } + + @Nullable + public TimeValue getTimeSpan() { + return timeSpan; + } + + Mode getMode() { + return mode; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MODE_FIELD.getPreferredName(), mode); + if (timeSpan != null) { + builder.field(TIME_SPAN_FIELD.getPreferredName(), timeSpan.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(mode, timeSpan); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ChunkingConfig other = (ChunkingConfig) obj; + return Objects.equals(this.mode, other.mode) && + Objects.equals(this.timeSpan, other.timeSpan); + } + + public static ChunkingConfig newAuto() { + return new ChunkingConfig(Mode.AUTO, null); + } + + public static ChunkingConfig newOff() { + return new ChunkingConfig(Mode.OFF, null); + } + + public static ChunkingConfig newManual(TimeValue timeSpan) { + return new ChunkingConfig(Mode.MANUAL, timeSpan); + } + + public enum Mode { + AUTO, MANUAL, OFF; + + public static Mode fromString(String value) { + return Mode.valueOf(value.toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java new file mode 100644 index 00000000000..85b7a0acea6 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java @@ -0,0 +1,329 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.datafeed; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + +/** + * Datafeed configuration options pojo. Describes where to proactively pull input + * data from. + *

+ * If a value has not been set it will be null. Object wrappers are + * used around integral types and booleans so they can take null + * values. + */ +public class DatafeedConfig implements ToXContentObject { + + public static final int DEFAULT_SCROLL_SIZE = 1000; + + public static final ParseField ID = new ParseField("datafeed_id"); + public static final ParseField QUERY_DELAY = new ParseField("query_delay"); + public static final ParseField FREQUENCY = new ParseField("frequency"); + public static final ParseField INDEXES = new ParseField("indexes"); + public static final ParseField INDICES = new ParseField("indices"); + public static final ParseField JOB_ID = new ParseField("job_id"); + public static final ParseField TYPES = new ParseField("types"); + public static final ParseField QUERY = new ParseField("query"); + public static final ParseField SCROLL_SIZE = new ParseField("scroll_size"); + public static final ParseField AGGREGATIONS = new ParseField("aggregations"); + public static final ParseField AGGS = new ParseField("aggs"); + public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); + public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "datafeed_config", true, a -> new Builder((String)a[0], (String)a[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID); + + PARSER.declareStringArray(Builder::setIndices, INDEXES); + PARSER.declareStringArray(Builder::setIndices, INDICES); + PARSER.declareStringArray(Builder::setTypes, TYPES); + PARSER.declareString((builder, val) -> + builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); + PARSER.declareString((builder, val) -> + builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); + PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); + PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); + PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); + PARSER.declareObject(Builder::setScriptFields, (p, c) -> { + List parsedScriptFields = new ArrayList<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p)); + } + return parsedScriptFields; + }, SCRIPT_FIELDS); + PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE); + PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG); + } + + private final String id; + private final String jobId; + + /** + * The delay before starting to query a period of time + */ + private final TimeValue queryDelay; + + /** + * The frequency with which queries are executed + */ + private final TimeValue frequency; + + private final List indices; + private final List types; + private final QueryBuilder query; + private final AggregatorFactories.Builder aggregations; + private final List scriptFields; + private final Integer scrollSize; + private final ChunkingConfig chunkingConfig; + + private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, + QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + Integer scrollSize, ChunkingConfig chunkingConfig) { + this.id = id; + this.jobId = jobId; + this.queryDelay = queryDelay; + this.frequency = frequency; + this.indices = indices == null ? null : Collections.unmodifiableList(indices); + this.types = types == null ? null : Collections.unmodifiableList(types); + this.query = query; + this.aggregations = aggregations; + this.scriptFields = scriptFields == null ? null : Collections.unmodifiableList(scriptFields); + this.scrollSize = scrollSize; + this.chunkingConfig = chunkingConfig; + } + + public String getId() { + return id; + } + + public String getJobId() { + return jobId; + } + + public TimeValue getQueryDelay() { + return queryDelay; + } + + public TimeValue getFrequency() { + return frequency; + } + + public List getIndices() { + return indices; + } + + public List getTypes() { + return types; + } + + public Integer getScrollSize() { + return scrollSize; + } + + public QueryBuilder getQuery() { + return query; + } + + public AggregatorFactories.Builder getAggregations() { + return aggregations; + } + + public List getScriptFields() { + return scriptFields == null ? Collections.emptyList() : scriptFields; + } + + public ChunkingConfig getChunkingConfig() { + return chunkingConfig; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(JOB_ID.getPreferredName(), jobId); + if (queryDelay != null) { + builder.field(QUERY_DELAY.getPreferredName(), queryDelay.getStringRep()); + } + if (frequency != null) { + builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); + } + builder.field(INDICES.getPreferredName(), indices); + builder.field(TYPES.getPreferredName(), types); + builder.field(QUERY.getPreferredName(), query); + if (aggregations != null) { + builder.field(AGGREGATIONS.getPreferredName(), aggregations); + } + if (scriptFields != null) { + builder.startObject(SCRIPT_FIELDS.getPreferredName()); + for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { + scriptField.toXContent(builder, params); + } + builder.endObject(); + } + builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); + if (chunkingConfig != null) { + builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); + } + + builder.endObject(); + return builder; + } + + /** + * The lists of indices and types are compared for equality but they are not + * sorted first so this test could fail simply because the indices and types + * lists are in different orders. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DatafeedConfig that = (DatafeedConfig) other; + + return Objects.equals(this.id, that.id) + && Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.frequency, that.frequency) + && Objects.equals(this.queryDelay, that.queryDelay) + && Objects.equals(this.indices, that.indices) + && Objects.equals(this.types, that.types) + && Objects.equals(this.query, that.query) + && Objects.equals(this.scrollSize, that.scrollSize) + && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.scriptFields, that.scriptFields) + && Objects.equals(this.chunkingConfig, that.chunkingConfig); + } + + @Override + public int hashCode() { + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + chunkingConfig); + } + + public static class Builder { + + private String id; + private String jobId; + private TimeValue queryDelay; + private TimeValue frequency; + private List indices = Collections.emptyList(); + private List types = Collections.emptyList(); + private QueryBuilder query = QueryBuilders.matchAllQuery(); + private AggregatorFactories.Builder aggregations; + private List scriptFields; + private Integer scrollSize = DEFAULT_SCROLL_SIZE; + private ChunkingConfig chunkingConfig; + + public Builder(String id, String jobId) { + this.id = Objects.requireNonNull(id, ID.getPreferredName()); + this.jobId = Objects.requireNonNull(jobId, JOB_ID.getPreferredName()); + } + + public Builder(DatafeedConfig config) { + this.id = config.id; + this.jobId = config.jobId; + this.queryDelay = config.queryDelay; + this.frequency = config.frequency; + this.indices = config.indices; + this.types = config.types; + this.query = config.query; + this.aggregations = config.aggregations; + this.scriptFields = config.scriptFields; + this.scrollSize = config.scrollSize; + this.chunkingConfig = config.chunkingConfig; + } + + public Builder setIndices(List indices) { + this.indices = Objects.requireNonNull(indices, INDICES.getPreferredName()); + return this; + } + + public Builder setTypes(List types) { + this.types = Objects.requireNonNull(types, TYPES.getPreferredName()); + return this; + } + + public Builder setQueryDelay(TimeValue queryDelay) { + this.queryDelay = queryDelay; + return this; + } + + public Builder setFrequency(TimeValue frequency) { + this.frequency = frequency; + return this; + } + + public Builder setQuery(QueryBuilder query) { + this.query = Objects.requireNonNull(query, QUERY.getPreferredName()); + return this; + } + + public Builder setAggregations(AggregatorFactories.Builder aggregations) { + this.aggregations = aggregations; + return this; + } + + public Builder setScriptFields(List scriptFields) { + List sorted = new ArrayList<>(scriptFields); + sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); + this.scriptFields = sorted; + return this; + } + + public Builder setScrollSize(int scrollSize) { + this.scrollSize = scrollSize; + return this; + } + + public Builder setChunkingConfig(ChunkingConfig chunkingConfig) { + this.chunkingConfig = chunkingConfig; + return this; + } + + public DatafeedConfig build() { + return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, + chunkingConfig); + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java new file mode 100644 index 00000000000..6afcdf1d2d8 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java @@ -0,0 +1,310 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.datafeed; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + +/** + * A datafeed update contains partial properties to update a {@link DatafeedConfig}. + * The main difference between this class and {@link DatafeedConfig} is that here all + * fields are nullable. + */ +public class DatafeedUpdate implements ToXContentObject { + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "datafeed_update", true, a -> new Builder((String)a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), DatafeedConfig.ID); + + PARSER.declareString(Builder::setJobId, DatafeedConfig.JOB_ID); + PARSER.declareStringArray(Builder::setIndices, DatafeedConfig.INDEXES); + PARSER.declareStringArray(Builder::setIndices, DatafeedConfig.INDICES); + PARSER.declareStringArray(Builder::setTypes, DatafeedConfig.TYPES); + PARSER.declareString((builder, val) -> builder.setQueryDelay( + TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); + PARSER.declareString((builder, val) -> builder.setFrequency( + TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); + PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); + PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), + DatafeedConfig.AGGREGATIONS); + PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), + DatafeedConfig.AGGS); + PARSER.declareObject(Builder::setScriptFields, (p, c) -> { + List parsedScriptFields = new ArrayList<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p)); + } + return parsedScriptFields; + }, DatafeedConfig.SCRIPT_FIELDS); + PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE); + PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG); + } + + private final String id; + private final String jobId; + private final TimeValue queryDelay; + private final TimeValue frequency; + private final List indices; + private final List types; + private final QueryBuilder query; + private final AggregatorFactories.Builder aggregations; + private final List scriptFields; + private final Integer scrollSize; + private final ChunkingConfig chunkingConfig; + + private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, + QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + Integer scrollSize, ChunkingConfig chunkingConfig) { + this.id = id; + this.jobId = jobId; + this.queryDelay = queryDelay; + this.frequency = frequency; + this.indices = indices; + this.types = types; + this.query = query; + this.aggregations = aggregations; + this.scriptFields = scriptFields; + this.scrollSize = scrollSize; + this.chunkingConfig = chunkingConfig; + } + + /** + * Get the id of the datafeed to update + */ + public String getId() { + return id; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), id); + addOptionalField(builder, DatafeedConfig.JOB_ID, jobId); + if (queryDelay != null) { + builder.field(DatafeedConfig.QUERY_DELAY.getPreferredName(), queryDelay.getStringRep()); + } + if (frequency != null) { + builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep()); + } + addOptionalField(builder, DatafeedConfig.INDICES, indices); + addOptionalField(builder, DatafeedConfig.TYPES, types); + addOptionalField(builder, DatafeedConfig.QUERY, query); + addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations); + if (scriptFields != null) { + builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()); + for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { + scriptField.toXContent(builder, params); + } + builder.endObject(); + } + addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize); + addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig); + builder.endObject(); + return builder; + } + + private void addOptionalField(XContentBuilder builder, ParseField field, Object value) throws IOException { + if (value != null) { + builder.field(field.getPreferredName(), value); + } + } + + public String getJobId() { + return jobId; + } + + public TimeValue getQueryDelay() { + return queryDelay; + } + + public TimeValue getFrequency() { + return frequency; + } + + public List getIndices() { + return indices; + } + + public List getTypes() { + return types; + } + + public Integer getScrollSize() { + return scrollSize; + } + + public QueryBuilder getQuery() { + return query; + } + + public AggregatorFactories.Builder getAggregations() { + return aggregations; + } + + public List getScriptFields() { + return scriptFields == null ? Collections.emptyList() : scriptFields; + } + + public ChunkingConfig getChunkingConfig() { + return chunkingConfig; + } + + /** + * The lists of indices and types are compared for equality but they are not + * sorted first so this test could fail simply because the indices and types + * lists are in different orders. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DatafeedUpdate that = (DatafeedUpdate) other; + + return Objects.equals(this.id, that.id) + && Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.frequency, that.frequency) + && Objects.equals(this.queryDelay, that.queryDelay) + && Objects.equals(this.indices, that.indices) + && Objects.equals(this.types, that.types) + && Objects.equals(this.query, that.query) + && Objects.equals(this.scrollSize, that.scrollSize) + && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.scriptFields, that.scriptFields) + && Objects.equals(this.chunkingConfig, that.chunkingConfig); + } + + @Override + public int hashCode() { + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + chunkingConfig); + } + + public static class Builder { + + private String id; + private String jobId; + private TimeValue queryDelay; + private TimeValue frequency; + private List indices; + private List types; + private QueryBuilder query; + private AggregatorFactories.Builder aggregations; + private List scriptFields; + private Integer scrollSize; + private ChunkingConfig chunkingConfig; + + public Builder(String id) { + this.id = Objects.requireNonNull(id, DatafeedConfig.ID.getPreferredName()); + } + + public Builder(DatafeedUpdate config) { + this.id = config.id; + this.jobId = config.jobId; + this.queryDelay = config.queryDelay; + this.frequency = config.frequency; + this.indices = config.indices; + this.types = config.types; + this.query = config.query; + this.aggregations = config.aggregations; + this.scriptFields = config.scriptFields; + this.scrollSize = config.scrollSize; + this.chunkingConfig = config.chunkingConfig; + } + + public Builder setJobId(String jobId) { + this.jobId = jobId; + return this; + } + + public Builder setIndices(List indices) { + this.indices = indices; + return this; + } + + public Builder setTypes(List types) { + this.types = types; + return this; + } + + public Builder setQueryDelay(TimeValue queryDelay) { + this.queryDelay = queryDelay; + return this; + } + + public Builder setFrequency(TimeValue frequency) { + this.frequency = frequency; + return this; + } + + public Builder setQuery(QueryBuilder query) { + this.query = query; + return this; + } + + public Builder setAggregations(AggregatorFactories.Builder aggregations) { + this.aggregations = aggregations; + return this; + } + + public Builder setScriptFields(List scriptFields) { + List sorted = new ArrayList<>(scriptFields); + sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); + this.scriptFields = sorted; + return this; + } + + public Builder setScrollSize(int scrollSize) { + this.scrollSize = scrollSize; + return this; + } + + public Builder setChunkingConfig(ChunkingConfig chunkingConfig) { + this.chunkingConfig = chunkingConfig; + return this; + } + + public DatafeedUpdate build() { + return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, + chunkingConfig); + } + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java new file mode 100644 index 00000000000..c835788bb1c --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.datafeed; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +public class ChunkingConfigTests extends AbstractXContentTestCase { + + @Override + protected ChunkingConfig createTestInstance() { + return createRandomizedChunk(); + } + + @Override + protected ChunkingConfig doParseInstance(XContentParser parser) { + return ChunkingConfig.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public static ChunkingConfig createRandomizedChunk() { + ChunkingConfig.Mode mode = randomFrom(ChunkingConfig.Mode.values()); + TimeValue timeSpan = null; + if (mode == ChunkingConfig.Mode.MANUAL) { + // time span is required to be at least 1 millis, so we use a custom method to generate a time value here + timeSpan = randomPositiveSecondsMinutesHours(); + } + return new ChunkingConfig(mode, timeSpan); + } + + private static TimeValue randomPositiveSecondsMinutesHours() { + return new TimeValue(randomIntBetween(1, 1000), randomFrom(Arrays.asList(TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS))); + } + +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java new file mode 100644 index 00000000000..f45d88d318e --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java @@ -0,0 +1,177 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.datafeed; + +import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class DatafeedConfigTests extends AbstractXContentTestCase { + + @Override + protected DatafeedConfig createTestInstance() { + long bucketSpanMillis = 3600000; + DatafeedConfig.Builder builder = constructBuilder(); + builder.setIndices(randomStringList(1, 10)); + builder.setTypes(randomStringList(0, 10)); + if (randomBoolean()) { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + boolean addScriptFields = randomBoolean(); + if (addScriptFields) { + int scriptsSize = randomInt(3); + List scriptFields = new ArrayList<>(scriptsSize); + for (int scriptIndex = 0; scriptIndex < scriptsSize; scriptIndex++) { + scriptFields.add(new ScriptField(randomAlphaOfLength(10), mockScript(randomAlphaOfLength(10)), + randomBoolean())); + } + builder.setScriptFields(scriptFields); + } + Long aggHistogramInterval = null; + if (randomBoolean()) { + // can only test with a single agg as the xcontent order gets randomized by test base class and then + // the actual xcontent isn't the same and test fail. + // Testing with a single agg is ok as we don't have special list xcontent logic + AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); + aggHistogramInterval = randomNonNegativeLong(); + aggHistogramInterval = aggHistogramInterval > bucketSpanMillis ? bucketSpanMillis : aggHistogramInterval; + aggHistogramInterval = aggHistogramInterval <= 0 ? 1 : aggHistogramInterval; + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + aggs.addAggregator(AggregationBuilders.dateHistogram("buckets") + .interval(aggHistogramInterval).subAggregation(maxTime).field("time")); + builder.setAggregations(aggs); + } + if (randomBoolean()) { + builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + if (aggHistogramInterval == null) { + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); + } else { + builder.setFrequency(TimeValue.timeValueMillis(randomIntBetween(1, 5) * aggHistogramInterval)); + } + } + if (randomBoolean()) { + builder.setQueryDelay(TimeValue.timeValueMillis(randomIntBetween(1, 1_000_000))); + } + if (randomBoolean()) { + builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); + } + return builder.build(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + public static List randomStringList(int min, int max) { + int size = scaledRandomIntBetween(min, max); + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(randomAlphaOfLength(10)); + } + return list; + } + + @Override + protected DatafeedConfig doParseInstance(XContentParser parser) { + return DatafeedConfig.PARSER.apply(parser, null).build(); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + private static final String FUTURE_DATAFEED = "{\n" + + " \"datafeed_id\": \"farequote-datafeed\",\n" + + " \"job_id\": \"farequote\",\n" + + " \"frequency\": \"1h\",\n" + + " \"indices\": [\"farequote1\", \"farequote2\"],\n" + + " \"tomorrows_technology_today\": \"amazing\",\n" + + " \"scroll_size\": 1234\n" + + "}"; + + public void testFutureMetadataParse() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); + // Unlike the config version of this test, the metadata parser should tolerate the unknown future field + assertNotNull(DatafeedConfig.PARSER.apply(parser, null).build()); + } + + public void testCopyConstructor() { + for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { + DatafeedConfig datafeedConfig = createTestInstance(); + DatafeedConfig copy = new DatafeedConfig.Builder(datafeedConfig).build(); + assertEquals(datafeedConfig, copy); + } + } + + public void testCheckValid_GivenNullIdInConstruction() { + expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(null, null)); + } + + public void testCheckValid_GivenNullJobId() { + expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(randomValidDatafeedId(), null)); + } + + public void testCheckValid_GivenNullIndices() { + DatafeedConfig.Builder conf = constructBuilder(); + expectThrows(NullPointerException.class, () -> conf.setIndices(null)); + } + + public void testCheckValid_GivenNullType() { + DatafeedConfig.Builder conf = constructBuilder(); + expectThrows(NullPointerException.class, () -> conf.setTypes(null)); + } + + public void testCheckValid_GivenNullQuery() { + DatafeedConfig.Builder conf = constructBuilder(); + expectThrows(NullPointerException.class, () -> conf.setQuery(null)); + } + + public static String randomValidDatafeedId() { + CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); + return generator.ofCodePointsLength(random(), 10, 10); + } + + private static DatafeedConfig.Builder constructBuilder() { + return new DatafeedConfig.Builder(randomValidDatafeedId(), randomAlphaOfLength(10)); + } + +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java new file mode 100644 index 00000000000..edbef8461e0 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.datafeed; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class DatafeedUpdateTests extends AbstractXContentTestCase { + + @Override + protected DatafeedUpdate createTestInstance() { + DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(DatafeedConfigTests.randomValidDatafeedId()); + if (randomBoolean()) { + builder.setJobId(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + builder.setQueryDelay(TimeValue.timeValueMillis(randomIntBetween(1, Integer.MAX_VALUE))); + } + if (randomBoolean()) { + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, Integer.MAX_VALUE))); + } + if (randomBoolean()) { + builder.setIndices(DatafeedConfigTests.randomStringList(1, 10)); + } + if (randomBoolean()) { + builder.setTypes(DatafeedConfigTests.randomStringList(1, 10)); + } + if (randomBoolean()) { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + int scriptsSize = randomInt(3); + List scriptFields = new ArrayList<>(scriptsSize); + for (int scriptIndex = 0; scriptIndex < scriptsSize; scriptIndex++) { + scriptFields.add(new SearchSourceBuilder.ScriptField(randomAlphaOfLength(10), mockScript(randomAlphaOfLength(10)), + randomBoolean())); + } + builder.setScriptFields(scriptFields); + } + if (randomBoolean()) { + // can only test with a single agg as the xcontent order gets randomized by test base class and then + // the actual xcontent isn't the same and test fail. + // Testing with a single agg is ok as we don't have special list xcontent logic + AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); + aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); + builder.setAggregations(aggs); + } + if (randomBoolean()) { + builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); + } + return builder.build(); + } + + @Override + protected DatafeedUpdate doParseInstance(XContentParser parser) { + return DatafeedUpdate.PARSER.apply(parser, null).build(); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + +} From 4dda5a990bbf78c73569cbb27c8a47afcadcae27 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 6 Aug 2018 22:46:26 +0200 Subject: [PATCH 08/16] INGEST: Fix ThreadWatchDog Throwing on Shutdown (#32578) * INGEST: Fix ThreadWatchDog Throwing on Shutdown * #32539 is caused by the fact that ThreadWatchDog.Default could throw on shutdown if the ThreadPool is interrupted while `interruptLongRunningExecutions` is in progress. This is a result of the watchdog not having a lifecycle of its own (normally it terminates when the threadpool terminates). * We can't easily use `org.elasticsearch.common.util.concurrent.EsRejectedExecutionException#isExecutorShutdown` to catch this state the same way other components do since thatwould require adding the core lib to Grok as a dependency * Since we have no knowledge of the lifecycle in this compontent since we're only passed the scheduler `BiFunction` I fixed this by only scheduling the watchdog when there's actually registered threads in it. * I think using the patter of locking via two `Atomic*` values should not be much of a performance concern here under load since either the integer will likely be > 0 in this case (because we have multiple Grok in parallel) or the running state will be true because there likely was at least one thread registered when the watchdog ran and so the enqueing of the watchdog task during `register` will happen very rarely here (in the worst case scenario of only a single Grok thread it will happen less frequently than once every `ingest.grok.watchdog.interval`). The atomic update on the count should not be relevant relative to the cost of adding a new node to the CHM either. * Fixes #32539 * Also fixes the watchdog to run if it doens't have to in general. --- .../elasticsearch/grok/ThreadWatchdog.java | 16 +++++- .../grok/ThreadWatchdogTests.java | 54 +++++++++++++++++-- 2 files changed, 63 insertions(+), 7 deletions(-) diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/ThreadWatchdog.java b/libs/grok/src/main/java/org/elasticsearch/grok/ThreadWatchdog.java index d0de7637d2c..f3515fcfe83 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/ThreadWatchdog.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/ThreadWatchdog.java @@ -21,6 +21,8 @@ package org.elasticsearch.grok; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.LongSupplier; @@ -104,6 +106,8 @@ public interface ThreadWatchdog { private final long maxExecutionTime; private final LongSupplier relativeTimeSupplier; private final BiFunction> scheduler; + private final AtomicInteger registered = new AtomicInteger(0); + private final AtomicBoolean running = new AtomicBoolean(false); final ConcurrentHashMap registry = new ConcurrentHashMap<>(); private Default(long interval, @@ -114,11 +118,14 @@ public interface ThreadWatchdog { this.maxExecutionTime = maxExecutionTime; this.relativeTimeSupplier = relativeTimeSupplier; this.scheduler = scheduler; - scheduler.apply(interval, this::interruptLongRunningExecutions); } public void register() { + registered.getAndIncrement(); Long previousValue = registry.put(Thread.currentThread(), relativeTimeSupplier.getAsLong()); + if (running.compareAndSet(false, true) == true) { + scheduler.apply(interval, this::interruptLongRunningExecutions); + } assert previousValue == null; } @@ -129,6 +136,7 @@ public interface ThreadWatchdog { public void unregister() { Long previousValue = registry.remove(Thread.currentThread()); + registered.decrementAndGet(); assert previousValue != null; } @@ -140,7 +148,11 @@ public interface ThreadWatchdog { // not removing the entry here, this happens in the unregister() method. } } - scheduler.apply(interval, this::interruptLongRunningExecutions); + if (registered.get() > 0) { + scheduler.apply(interval, this::interruptLongRunningExecutions); + } else { + running.set(false); + } } } diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/ThreadWatchdogTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/ThreadWatchdogTests.java index 46faa4ae05d..29e2351215f 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/ThreadWatchdogTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/ThreadWatchdogTests.java @@ -18,15 +18,25 @@ */ package org.elasticsearch.grok; -import org.elasticsearch.test.ESTestCase; - import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import org.elasticsearch.test.ESTestCase; +import org.mockito.Mockito; import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; public class ThreadWatchdogTests extends ESTestCase { - + public void testInterrupt() throws Exception { AtomicBoolean run = new AtomicBoolean(true); // to avoid a lingering thread when test has completed ThreadWatchdog watchdog = ThreadWatchdog.newInstance(10, 100, System::currentTimeMillis, (delay, command) -> { @@ -43,7 +53,7 @@ public class ThreadWatchdogTests extends ESTestCase { thread.start(); return null; }); - + Map registry = ((ThreadWatchdog.Default) watchdog).registry; assertThat(registry.size(), is(0)); // need to call #register() method on a different thread, assertBusy() fails if current thread gets interrupted @@ -66,5 +76,39 @@ public class ThreadWatchdogTests extends ESTestCase { assertThat(registry.size(), is(0)); }); } - + + public void testIdleIfNothingRegistered() throws Exception { + long interval = 1L; + ScheduledExecutorService threadPool = mock(ScheduledExecutorService.class); + ThreadWatchdog watchdog = ThreadWatchdog.newInstance(interval, Long.MAX_VALUE, System::currentTimeMillis, + (delay, command) -> threadPool.schedule(command, delay, TimeUnit.MILLISECONDS)); + // Periodic action is not scheduled because no thread is registered + verifyZeroInteractions(threadPool); + CompletableFuture commandFuture = new CompletableFuture<>(); + // Periodic action is scheduled because a thread is registered + doAnswer(invocationOnMock -> { + commandFuture.complete((Runnable) invocationOnMock.getArguments()[0]); + return null; + }).when(threadPool).schedule( + any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS) + ); + watchdog.register(); + // Registering the first thread should have caused the command to get scheduled again + Runnable command = commandFuture.get(1L, TimeUnit.MILLISECONDS); + Mockito.reset(threadPool); + watchdog.unregister(); + command.run(); + // Periodic action is not scheduled again because no thread is registered + verifyZeroInteractions(threadPool); + watchdog.register(); + Thread otherThread = new Thread(watchdog::register); + try { + verify(threadPool).schedule(any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS)); + // Registering a second thread does not cause the command to get scheduled twice + verifyNoMoreInteractions(threadPool); + otherThread.start(); + } finally { + otherThread.join(); + } + } } From b46e13629f4c02e5abbfcad16175b91a3e2d8279 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Aug 2018 14:43:53 -0700 Subject: [PATCH 09/16] Docs: Allow snippets to have line continuation (#32649) Currently, snippets in lists cannot be rendered correctly as a console command because the console command requires a line continuation '+'. This allows snippets to have a line continuation between the snippet and the // CONSOLE. --- .../groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index ec012633f08..8c0eedeb6f5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -284,6 +284,10 @@ public class SnippetsTask extends DefaultTask { contents.append(line).append('\n') return } + // Allow line continuations for console snippets within lists + if (snippet != null && line.trim() == '+') { + return + } // Just finished emit() } From 3fb09231826f4f5c63760123e77a587ee63fa9fb Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 6 Aug 2018 18:07:46 -0400 Subject: [PATCH 10/16] Fix content type detection with leading whitespace (#32632) Today content type detection on an input stream works by peeking up to twenty bytes into the stream. If the stream is headed by more whitespace than twenty bytes, we might fail to detect the content type. We should be ignoring this whitespace before attempting to detect the content type. This commit does that by ignoring all leading whitespace in an input stream before attempting to guess the content type. --- .../common/xcontent/XContentFactory.java | 37 +++++++++++++++---- .../common/xcontent/XContentFactoryTests.java | 18 ++++++++- 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index fb871590df7..38bc251be41 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -35,7 +35,7 @@ import java.io.OutputStream; */ public class XContentFactory { - private static final int GUESS_HEADER_LENGTH = 20; + static final int GUESS_HEADER_LENGTH = 20; /** * Returns a content builder using JSON format ({@link org.elasticsearch.common.xcontent.XContentType#JSON}. @@ -153,8 +153,10 @@ public class XContentFactory { return XContentType.JSON; } // Should we throw a failure here? Smile idea is to use it in bytes.... - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && - content.charAt(2) == SmileConstants.HEADER_BYTE_3) { + if (length > 2 + && first == SmileConstants.HEADER_BYTE_1 + && content.charAt(1) == SmileConstants.HEADER_BYTE_2 + && content.charAt(2) == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') { @@ -227,13 +229,29 @@ public class XContentFactory { */ @Deprecated public static XContentType xContentType(InputStream si) throws IOException { + /* + * We need to guess the content type. To do this, we look for the first non-whitespace character and then try to guess the content + * type on the GUESS_HEADER_LENGTH bytes that follow. We do this in a way that does not modify the initial read position in the + * underlying input stream. This is why the input stream must support mark/reset and why we repeatedly mark the read position and + * reset. + */ if (si.markSupported() == false) { throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass()); } - si.mark(GUESS_HEADER_LENGTH); + si.mark(Integer.MAX_VALUE); try { + // scan until we find the first non-whitespace character or the end of the stream + int current; + do { + current = si.read(); + if (current == -1) { + return null; + } + } while (Character.isWhitespace((char) current)); + // now guess the content type off the next GUESS_HEADER_LENGTH bytes including the current byte final byte[] firstBytes = new byte[GUESS_HEADER_LENGTH]; - int read = 0; + firstBytes[0] = (byte) current; + int read = 1; while (read < GUESS_HEADER_LENGTH) { final int r = si.read(firstBytes, read, GUESS_HEADER_LENGTH - read); if (r == -1) { @@ -245,6 +263,7 @@ public class XContentFactory { } finally { si.reset(); } + } /** @@ -278,15 +297,17 @@ public class XContentFactory { if (first == '{') { return XContentType.JSON; } - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && - bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { + if (length > 2 + && first == SmileConstants.HEADER_BYTE_1 + && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 + && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') { return XContentType.YAML; } // CBOR logic similar to CBORFactory#hasCBORFormat - if (first == CBORConstants.BYTE_OBJECT_INDEFINITE && length > 1){ + if (first == CBORConstants.BYTE_OBJECT_INDEFINITE && length > 1) { return XContentType.CBOR; } if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_TAG, first) && length > 2) { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java index a893fb63ec8..1a0d0dead6e 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; @@ -54,8 +55,21 @@ public class XContentFactoryTests extends ESTestCase { builder.field("field1", "value1"); builder.endObject(); - assertThat(XContentHelper.xContentType(BytesReference.bytes(builder)), equalTo(type)); - assertThat(XContentFactory.xContentType(BytesReference.bytes(builder).streamInput()), equalTo(type)); + final BytesReference bytes; + if (type == XContentType.JSON && randomBoolean()) { + final int length = randomIntBetween(0, 8 * XContentFactory.GUESS_HEADER_LENGTH); + final String content = Strings.toString(builder); + final StringBuilder sb = new StringBuilder(length + content.length()); + final char[] chars = new char[length]; + Arrays.fill(chars, ' '); + sb.append(new String(chars)).append(content); + bytes = new BytesArray(sb.toString()); + } else { + bytes = BytesReference.bytes(builder); + } + + assertThat(XContentHelper.xContentType(bytes), equalTo(type)); + assertThat(XContentFactory.xContentType(bytes.streamInput()), equalTo(type)); // CBOR is binary, cannot use String if (type != XContentType.CBOR && type != XContentType.SMILE) { From 1122314b3bdd803f72ef93f6930522995a381eba Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 7 Aug 2018 09:39:24 +0200 Subject: [PATCH 11/16] [Rollup] Remove builders from GroupConfig (#32614) --- .../core/rollup/action/RollupJobCaps.java | 10 +- .../rollup/job/DateHistogramGroupConfig.java | 2 +- .../xpack/core/rollup/job/GroupConfig.java | 168 ++++++++---------- .../core/rollup/job/HistogramGroupConfig.java | 2 +- .../core/rollup/job/RollupJobConfig.java | 2 +- .../core/rollup/job/TermsGroupConfig.java | 2 +- .../xpack/core/rollup/ConfigTestHelpers.java | 17 +- .../job/GroupConfigSerializingTests.java | 10 +- .../xpack/rollup/job/IndexerUtils.java | 6 +- .../xpack/rollup/job/RollupIndexer.java | 14 +- .../rollup/RollupJobIdentifierUtilTests.java | 159 +++++++---------- .../rollup/action/SearchActionTests.java | 87 ++++----- .../xpack/rollup/config/ConfigTests.java | 20 +-- .../xpack/rollup/job/IndexerUtilsTests.java | 53 +++--- .../job/RollupIndexerIndexingTests.java | 23 +-- .../rollup/job/RollupIndexerStateTests.java | 2 +- 16 files changed, 246 insertions(+), 331 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java index c8874ae459d..1b8eb736084 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -42,8 +42,8 @@ public class RollupJobCaps implements Writeable, ToXContentObject { jobID = job.getId(); rollupIndex = job.getRollupIndex(); indexPattern = job.getIndexPattern(); - Map dateHistoAggCap = job.getGroupConfig().getDateHisto().toAggCap(); - String dateField = job.getGroupConfig().getDateHisto().getField(); + Map dateHistoAggCap = job.getGroupConfig().getDateHistogram().toAggCap(); + String dateField = job.getGroupConfig().getDateHistogram().getField(); RollupFieldCaps fieldCaps = fieldCapLookup.get(dateField); if (fieldCaps == null) { fieldCaps = new RollupFieldCaps(); @@ -51,9 +51,9 @@ public class RollupJobCaps implements Writeable, ToXContentObject { fieldCaps.addAgg(dateHistoAggCap); fieldCapLookup.put(dateField, fieldCaps); - if (job.getGroupConfig().getHisto() != null) { - Map histoAggCap = job.getGroupConfig().getHisto().toAggCap(); - Arrays.stream(job.getGroupConfig().getHisto().getFields()).forEach(field -> { + if (job.getGroupConfig().getHistogram() != null) { + Map histoAggCap = job.getGroupConfig().getHistogram().toAggCap(); + Arrays.stream(job.getGroupConfig().getHistogram().getFields()).forEach(field -> { RollupFieldCaps caps = fieldCapLookup.get(field); if (caps == null) { caps = new RollupFieldCaps(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index add60403a98..77dfa1cbbb1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -54,7 +54,7 @@ import static org.elasticsearch.common.xcontent.ObjectParser.ValueType; */ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { - private static final String NAME = "date_histogram"; + static final String NAME = "date_histogram"; private static final String INTERVAL = "interval"; private static final String FIELD = "field"; public static final String TIME_ZONE = "time_zone"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java index 59e7d1127e1..f7685f4e614 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java @@ -13,17 +13,21 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; import static java.util.Arrays.asList; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * The configuration object for the groups section in the rollup config. @@ -38,64 +42,85 @@ import static java.util.Arrays.asList; * } */ public class GroupConfig implements Writeable, ToXContentObject { - private static final String NAME = "grouping_config"; - private static final ParseField DATE_HISTO = new ParseField("date_histogram"); - private static final ParseField HISTO = new ParseField("histogram"); - private static final ParseField TERMS = new ParseField("terms"); - - private final DateHistogramGroupConfig dateHisto; - private final HistogramGroupConfig histo; - private final TermsGroupConfig terms; - - public static final ObjectParser PARSER = new ObjectParser<>(NAME, GroupConfig.Builder::new); + public static final String NAME = "groups"; + private static final ConstructingObjectParser PARSER; static { - PARSER.declareObject(GroupConfig.Builder::setDateHisto, (p,c) -> DateHistogramGroupConfig.fromXContent(p), DATE_HISTO); - PARSER.declareObject(GroupConfig.Builder::setHisto, (p,c) -> HistogramGroupConfig.fromXContent(p), HISTO); - PARSER.declareObject(GroupConfig.Builder::setTerms, (p,c) -> TermsGroupConfig.fromXContent(p), TERMS); + PARSER = new ConstructingObjectParser<>(NAME, args -> + new GroupConfig((DateHistogramGroupConfig) args[0], (HistogramGroupConfig) args[1], (TermsGroupConfig) args[2])); + PARSER.declareObject(constructorArg(), + (p, c) -> DateHistogramGroupConfig.fromXContent(p), new ParseField(DateHistogramGroupConfig.NAME)); + PARSER.declareObject(optionalConstructorArg(), + (p, c) -> HistogramGroupConfig.fromXContent(p), new ParseField(HistogramGroupConfig.NAME)); + PARSER.declareObject(optionalConstructorArg(), + (p, c) -> TermsGroupConfig.fromXContent(p), new ParseField(TermsGroupConfig.NAME)); } - private GroupConfig(DateHistogramGroupConfig dateHisto, @Nullable HistogramGroupConfig histo, @Nullable TermsGroupConfig terms) { - this.dateHisto = Objects.requireNonNull(dateHisto, "A date_histogram group is mandatory"); - this.histo = histo; + private final DateHistogramGroupConfig dateHistogram; + private final @Nullable HistogramGroupConfig histogram; + private final @Nullable TermsGroupConfig terms; + + public GroupConfig(final DateHistogramGroupConfig dateHistogram) { + this(dateHistogram, null, null); + } + + public GroupConfig(final DateHistogramGroupConfig dateHistogram, + final @Nullable HistogramGroupConfig histogram, + final @Nullable TermsGroupConfig terms) { + if (dateHistogram == null) { + throw new IllegalArgumentException("Date histogram must not be null"); + } + this.dateHistogram = dateHistogram; + this.histogram = histogram; this.terms = terms; } - GroupConfig(StreamInput in) throws IOException { - dateHisto = new DateHistogramGroupConfig(in); - histo = in.readOptionalWriteable(HistogramGroupConfig::new); + GroupConfig(final StreamInput in) throws IOException { + dateHistogram = new DateHistogramGroupConfig(in); + histogram = in.readOptionalWriteable(HistogramGroupConfig::new); terms = in.readOptionalWriteable(TermsGroupConfig::new); } - public DateHistogramGroupConfig getDateHisto() { - return dateHisto; + /** + * @return the configuration of the date histogram + */ + public DateHistogramGroupConfig getDateHistogram() { + return dateHistogram; } - public HistogramGroupConfig getHisto() { - return histo; + /** + * @return the configuration of the histogram + */ + @Nullable + public HistogramGroupConfig getHistogram() { + return histogram; } + /** + * @return the configuration of the terms + */ + @Nullable public TermsGroupConfig getTerms() { return terms; } public Set getAllFields() { Set fields = new HashSet<>(); - fields.add(dateHisto.getField()); - if (histo != null) { - fields.addAll(asList(histo.getFields())); + fields.add(dateHistogram.getField()); + if (histogram != null) { + fields.addAll(asList(histogram.getFields())); } if (terms != null) { fields.addAll(asList(terms.getFields())); } - return fields; + return Collections.unmodifiableSet(fields); } - public void validateMappings(Map> fieldCapsResponse, - ActionRequestValidationException validationException) { - dateHisto.validateMappings(fieldCapsResponse, validationException); - if (histo != null) { - histo.validateMappings(fieldCapsResponse, validationException); + public void validateMappings(final Map> fieldCapsResponse, + final ActionRequestValidationException validationException) { + dateHistogram.validateMappings(fieldCapsResponse, validationException); + if (histogram != null) { + histogram.validateMappings(fieldCapsResponse, validationException); } if (terms != null) { terms.validateMappings(fieldCapsResponse, validationException); @@ -105,44 +130,43 @@ public class GroupConfig implements Writeable, ToXContentObject { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DATE_HISTO.getPreferredName(), dateHisto); - if (histo != null) { - builder.field(HISTO.getPreferredName(), histo); + { + builder.field(DateHistogramGroupConfig.NAME, dateHistogram); + if (histogram != null) { + builder.field(HistogramGroupConfig.NAME, histogram); + } + if (terms != null) { + builder.field(TermsGroupConfig.NAME, terms); + } } - if (terms != null) { - builder.field(TERMS.getPreferredName(), terms); - } - builder.endObject(); - return builder; + return builder.endObject(); } @Override - public void writeTo(StreamOutput out) throws IOException { - dateHisto.writeTo(out); - out.writeOptionalWriteable(histo); + public void writeTo(final StreamOutput out) throws IOException { + dateHistogram.writeTo(out); + out.writeOptionalWriteable(histogram); out.writeOptionalWriteable(terms); } @Override - public boolean equals(Object other) { + public boolean equals(final Object other) { if (this == other) { return true; } - if (other == null || getClass() != other.getClass()) { return false; } - GroupConfig that = (GroupConfig) other; - - return Objects.equals(this.dateHisto, that.dateHisto) - && Objects.equals(this.histo, that.histo) - && Objects.equals(this.terms, that.terms); + final GroupConfig that = (GroupConfig) other; + return Objects.equals(dateHistogram, that.dateHistogram) + && Objects.equals(histogram, that.histogram) + && Objects.equals(terms, that.terms); } @Override public int hashCode() { - return Objects.hash(dateHisto, histo, terms); + return Objects.hash(dateHistogram, histogram, terms); } @Override @@ -150,43 +174,7 @@ public class GroupConfig implements Writeable, ToXContentObject { return Strings.toString(this, true, true); } - public static class Builder { - private DateHistogramGroupConfig dateHisto; - private HistogramGroupConfig histo; - private TermsGroupConfig terms; - - public DateHistogramGroupConfig getDateHisto() { - return dateHisto; - } - - public GroupConfig.Builder setDateHisto(DateHistogramGroupConfig dateHisto) { - this.dateHisto = dateHisto; - return this; - } - - public HistogramGroupConfig getHisto() { - return histo; - } - - public GroupConfig.Builder setHisto(HistogramGroupConfig histo) { - this.histo = histo; - return this; - } - - public TermsGroupConfig getTerms() { - return terms; - } - - public GroupConfig.Builder setTerms(TermsGroupConfig terms) { - this.terms = terms; - return this; - } - - public GroupConfig build() { - if (dateHisto == null) { - throw new IllegalArgumentException("A date_histogram group is mandatory"); - } - return new GroupConfig(dateHisto, histo, terms); - } + public static GroupConfig fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java index 4f67978e4bc..0480050bf52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java @@ -46,7 +46,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru */ public class HistogramGroupConfig implements Writeable, ToXContentObject { - public static final String NAME = "histogram"; + static final String NAME = "histogram"; private static final String INTERVAL = "interval"; private static final String FIELDS = "fields"; private static final ConstructingObjectParser PARSER; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java index 1abec72ef53..b876aa251cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java @@ -62,7 +62,7 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject { static { PARSER.declareString(RollupJobConfig.Builder::setId, RollupField.ID); - PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.PARSER.apply(p,c).build(), GROUPS); + PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.fromXContent(p), GROUPS); PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, (p, c) -> MetricConfig.fromXContent(p), METRICS); PARSER.declareString((params, val) -> params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java index a1b0b3118ec..32507d57f32 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java @@ -45,7 +45,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru */ public class TermsGroupConfig implements Writeable, ToXContentObject { - private static final String NAME = "terms"; + static final String NAME = "terms"; private static final String FIELDS = "fields"; private static final List FLOAT_TYPES = Arrays.asList("half_float", "float", "double", "scaled_float"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index 16ce6158b35..6713fc75032 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -37,7 +37,7 @@ public class ConfigTestHelpers { String indexPattern = ESTestCase.randomAlphaOfLengthBetween(1,10); builder.setIndexPattern(indexPattern); builder.setRollupIndex("rollup_" + indexPattern); // to ensure the index pattern != rollup index - builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build()); + builder.setGroupConfig(ConfigTestHelpers.randomGroupConfig(ESTestCase.random())); builder.setPageSize(ESTestCase.randomIntBetween(1,10)); if (ESTestCase.randomBoolean()) { builder.setMetricsConfig(randomMetricsConfigs(ESTestCase.random())); @@ -45,16 +45,11 @@ public class ConfigTestHelpers { return builder; } - public static GroupConfig.Builder getGroupConfig() { - GroupConfig.Builder groupBuilder = new GroupConfig.Builder(); - groupBuilder.setDateHisto(randomDateHistogramGroupConfig(ESTestCase.random())); - if (ESTestCase.randomBoolean()) { - groupBuilder.setHisto(randomHistogramGroupConfig(ESTestCase.random())); - } - if (ESTestCase.randomBoolean()) { - groupBuilder.setTerms(randomTermsGroupConfig(ESTestCase.random())); - } - return groupBuilder; + public static GroupConfig randomGroupConfig(final Random random) { + DateHistogramGroupConfig dateHistogram = randomDateHistogramGroupConfig(random); + HistogramGroupConfig histogram = random.nextBoolean() ? randomHistogramGroupConfig(random) : null; + TermsGroupConfig terms = random.nextBoolean() ? randomTermsGroupConfig(random) : null; + return new GroupConfig(dateHistogram, histogram, terms); } private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"}; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java index c220f10aeab..49ea206ded7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java @@ -8,14 +8,16 @@ package org.elasticsearch.xpack.core.rollup.job; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import java.io.IOException; +import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomGroupConfig; + public class GroupConfigSerializingTests extends AbstractSerializingTestCase { + @Override - protected GroupConfig doParseInstance(XContentParser parser) throws IOException { - return GroupConfig.PARSER.apply(parser, null).build(); + protected GroupConfig doParseInstance(final XContentParser parser) throws IOException { + return GroupConfig.fromXContent(parser); } @Override @@ -25,6 +27,6 @@ public class GroupConfigSerializingTests extends AbstractSerializingTestCase histo -> terms if (groupConfig != null) { - builders.addAll(groupConfig.getDateHisto().toBuilders()); - metadata.putAll(groupConfig.getDateHisto().getMetadata()); - if (groupConfig.getHisto() != null) { - builders.addAll(groupConfig.getHisto().toBuilders()); - metadata.putAll(groupConfig.getHisto().getMetadata()); + builders.addAll(groupConfig.getDateHistogram().toBuilders()); + metadata.putAll(groupConfig.getDateHistogram().getMetadata()); + if (groupConfig.getHistogram() != null) { + builders.addAll(groupConfig.getHistogram().toBuilders()); + metadata.putAll(groupConfig.getHistogram().getMetadata()); } if (groupConfig.getTerms() != null) { builders.addAll(groupConfig.getTerms().toBuilders()); @@ -426,7 +426,7 @@ public abstract class RollupIndexer { */ private QueryBuilder createBoundaryQuery(Map position) { assert maxBoundary < Long.MAX_VALUE; - DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHisto(); + DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); String fieldName = dateHisto.getField(); String rollupFieldName = fieldName + "." + DateHistogramAggregationBuilder.NAME; long lowerBound = 0L; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java index 5467e11c5a0..24cb1dab0fa 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java @@ -36,14 +36,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testOneMatch() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()); + .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); @@ -51,9 +50,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testBiggerButCompatibleInterval() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); @@ -66,9 +64,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testIncompatibleInterval() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); @@ -82,9 +79,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testBadTimeZone() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); @@ -99,9 +95,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testMetricOnlyAgg() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max")))); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); @@ -114,9 +109,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testOneOfTwoMatchingCaps() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); @@ -131,21 +125,15 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testTwoJobsSameRollupIndex() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - group.setTerms(null); - group.setHisto(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(2); caps.add(cap); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2"); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - group2.setTerms(null); - group2.setHisto(null); - job2.setGroupConfig(group.build()); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job2.setGroupConfig(group); job2.setRollupIndex(job.getRollupIndex()); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); caps.add(cap2); @@ -161,18 +149,16 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testTwoJobsButBothPartialMatches() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max")))); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(2); caps.add(cap); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2"); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job2.setGroupConfig(group.build()); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job2.setGroupConfig(group); job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("min")))); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); caps.add(cap2); @@ -189,19 +175,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testComparableDifferentDateIntervals() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex()); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))) - .setHisto(null) - .setTerms(null); - job2.setGroupConfig(group2.build()); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); + job2.setGroupConfig(group2); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") @@ -218,19 +198,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testComparableDifferentDateIntervalsOnlyOneWorks() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex()); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))) - .setHisto(null) - .setTerms(null); - job2.setGroupConfig(group2.build()); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); + job2.setGroupConfig(group2); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") @@ -247,19 +221,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testComparableNoHistoVsHisto() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex()); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(new HistogramGroupConfig(100L, "bar")) - .setTerms(null); - job2.setGroupConfig(group2.build()); + final HistogramGroupConfig histoConfig = new HistogramGroupConfig(100L, "bar"); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), histoConfig, null); + job2.setGroupConfig(group2); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") @@ -277,19 +246,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testComparableNoTermsVsTerms() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex()); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(new TermsGroupConfig("bar")); - job2.setGroupConfig(group2.build()); + final TermsGroupConfig termsConfig = new TermsGroupConfig("bar"); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, termsConfig); + job2.setGroupConfig(group2); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") @@ -313,11 +277,12 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() + .setGroupConfig(new GroupConfig( // NOTE same name but wrong type - .setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())) - .setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name - .build()) + new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), + new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name + null + )) .setMetricsConfig( Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg")))) .build(); @@ -336,9 +301,9 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() - .setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())) - .build()) + .setGroupConfig(new GroupConfig( + new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()) + )) .setMetricsConfig( Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg")))) .build(); @@ -357,10 +322,10 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() + .setGroupConfig(new GroupConfig( // interval in job is much higher than agg interval above - .setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100d"), null, DateTimeZone.UTC.getID())) - .build()) + new DateHistogramGroupConfig("foo", new DateHistogramInterval("100d"), null, DateTimeZone.UTC.getID()) + )) .build(); Set caps = singletonSet(new RollupJobCaps(job)); @@ -377,10 +342,10 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() + .setGroupConfig(new GroupConfig( // NOTE different field from the one in the query - .setDateHisto(new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())) - .build()) + new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()) + )) .setMetricsConfig( Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg")))) .build(); @@ -399,10 +364,11 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() - .setDateHisto(new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())) - .setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name - .build()) + .setGroupConfig(new GroupConfig( + new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), + new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name + null + )) .setMetricsConfig( Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg")))) .build(); @@ -421,10 +387,11 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() - .setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())) - .setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name - .build()) + .setGroupConfig(new GroupConfig( + new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), + new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name + null + )) .build(); Set caps = singletonSet(new RollupJobCaps(job)); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 12c88823e2d..6aec4d4f438 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -58,7 +58,7 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.elasticsearch.xpack.rollup.Rollup; import org.hamcrest.core.IsEqual; -import org.joda.time.DateTimeZone; +import org.junit.Before; import org.mockito.Mockito; import java.io.IOException; @@ -81,6 +81,9 @@ import static org.mockito.Mockito.when; public class SearchActionTests extends ESTestCase { private NamedWriteableRegistry namedWriteableRegistry; + + @Override + @Before public void setUp() throws Exception { super.setUp(); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); @@ -119,9 +122,8 @@ public class SearchActionTests extends ESTestCase { public void testRange() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(groupConfig); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -132,9 +134,8 @@ public class SearchActionTests extends ESTestCase { public void testRangeNullTimeZone() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -145,9 +146,8 @@ public class SearchActionTests extends ESTestCase { public void testRangeWrongTZ() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -159,9 +159,9 @@ public class SearchActionTests extends ESTestCase { public void testTermQuery() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setTerms(new TermsGroupConfig("foo")); - job.setGroupConfig(group.build()); + final TermsGroupConfig termsConfig = new TermsGroupConfig("foo"); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("date", new DateHistogramInterval("1h")), null, termsConfig); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -172,9 +172,9 @@ public class SearchActionTests extends ESTestCase { public void testTermsQuery() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setTerms(new TermsGroupConfig("foo")); - job.setGroupConfig(group.build()); + final TermsGroupConfig termsConfig = new TermsGroupConfig("foo"); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("date", new DateHistogramInterval("1h")), null, termsConfig); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -189,9 +189,8 @@ public class SearchActionTests extends ESTestCase { public void testCompounds() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(groupConfig); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -205,9 +204,8 @@ public class SearchActionTests extends ESTestCase { public void testMatchAll() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(groupConfig); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -217,10 +215,9 @@ public class SearchActionTests extends ESTestCase { public void testAmbiguousResolution() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - group.setTerms(new TermsGroupConfig("foo")); - job.setGroupConfig(group.build()); + final TermsGroupConfig termsConfig = new TermsGroupConfig("foo"); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, termsConfig); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = new HashSet<>(); caps.add(cap); @@ -368,9 +365,8 @@ public class SearchActionTests extends ESTestCase { public void testGood() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); - job.setGroupConfig(group.build()); + final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(groupConfig); RollupJobCaps cap = new RollupJobCaps(job.build()); Set caps = singletonSet(cap); @@ -385,7 +381,7 @@ public class SearchActionTests extends ESTestCase { source.query(getQueryBuilder(1)); source.size(0); source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval())); + .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())); SearchRequest request = new SearchRequest(combinedIndices, source); MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx); @@ -414,9 +410,7 @@ public class SearchActionTests extends ESTestCase { SearchRequest request = new SearchRequest(combinedIndices, source); RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo") - .setGroupConfig(ConfigTestHelpers.getGroupConfig() - .setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())) - .build()) + .setGroupConfig(new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, "UTC"))) .build(); Set caps = singletonSet(new RollupJobCaps(job)); @@ -439,15 +433,12 @@ public class SearchActionTests extends ESTestCase { public void testTwoMatchingJobs() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex()); - job2.setGroupConfig(group.build()); + job2.setGroupConfig(group); // so that the jobs aren't exactly equal job2.setMetricsConfig(ConfigTestHelpers.randomMetricsConfigs(random())); @@ -468,7 +459,7 @@ public class SearchActionTests extends ESTestCase { source.query(getQueryBuilder(1)); source.size(0); source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval())); + .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())); SearchRequest request = new SearchRequest(combinedIndices, source); MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx); @@ -489,19 +480,13 @@ public class SearchActionTests extends ESTestCase { public void testTwoMatchingJobsOneBetter() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); - GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); - group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))) - .setHisto(null) - .setTerms(null); - job.setGroupConfig(group.build()); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + job.setGroupConfig(group); RollupJobCaps cap = new RollupJobCaps(job.build()); RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex()); - GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig(); - group2.setDateHisto(group.getDateHisto()) - .setHisto(randomHistogramGroupConfig(random())) - .setTerms(null); - job2.setGroupConfig(group2.build()); + final GroupConfig group2 = new GroupConfig(group.getDateHistogram(), randomHistogramGroupConfig(random()), null); + job2.setGroupConfig(group2); RollupJobCaps cap2 = new RollupJobCaps(job2.build()); Set caps = new HashSet<>(2); @@ -519,7 +504,7 @@ public class SearchActionTests extends ESTestCase { source.query(getQueryBuilder(1)); source.size(0); source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval())); + .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())); SearchRequest request = new SearchRequest(combinedIndices, source); MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index b8c11971111..6c4f2cabfa9 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -22,6 +22,8 @@ import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig; +import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomTermsGroupConfig; import static org.hamcrest.Matchers.equalTo; //TODO split this into dedicated unit test classes (one for each config object) public class ConfigTests extends ESTestCase { @@ -43,22 +45,14 @@ public class ConfigTests extends ESTestCase { } public void testEmptyGroup() { - GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig(); - groupConfig.setDateHisto(null); - groupConfig.setTerms(null); - groupConfig.setHisto(null); - - Exception e = expectThrows(IllegalArgumentException.class, groupConfig::build); - assertThat(e.getMessage(), equalTo("A date_histogram group is mandatory")); + Exception e = expectThrows(IllegalArgumentException.class, () -> new GroupConfig(null, null, null)); + assertThat(e.getMessage(), equalTo("Date histogram must not be null")); } public void testNoDateHisto() { - GroupConfig.Builder groupConfig = new GroupConfig.Builder(); - groupConfig.setTerms(ConfigTestHelpers.randomTermsGroupConfig(random())); - groupConfig.setHisto(ConfigTestHelpers.randomHistogramGroupConfig(random())); - - Exception e = expectThrows(IllegalArgumentException.class, groupConfig::build); - assertThat(e.getMessage(), equalTo("A date_histogram group is mandatory")); + Exception e = expectThrows(IllegalArgumentException.class, + () -> new GroupConfig(null, randomHistogramGroupConfig(random()), randomTermsGroupConfig(random()))); + assertThat(e.getMessage(), equalTo("Date histogram must not be null")); } public void testEmptyGroupAndMetrics() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java index 51a53db713b..e8c66f7e8c1 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggre import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; @@ -54,8 +53,10 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomDateHistogramGroupConfig; +import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomGroupConfig; +import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -112,8 +113,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { indexReader.close(); directory.close(); - List docs = IndexerUtils.processBuckets(composite, indexName, stats, - ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean()); + final GroupConfig groupConfig = randomGroupConfig(random()); + List docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(numDocs)); for (IndexRequest doc : docs) { @@ -179,8 +180,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { indexReader.close(); directory.close(); - List docs = IndexerUtils.processBuckets(composite, indexName, stats, - ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean()); + final GroupConfig groupConfig = randomGroupConfig(random()); + List docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(numDocs)); for (IndexRequest doc : docs) { @@ -235,8 +236,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { indexReader.close(); directory.close(); - List docs = IndexerUtils.processBuckets(composite, indexName, stats, - ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean()); + final GroupConfig groupConfig = randomGroupConfig(random()); + List docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(numDocs)); for (IndexRequest doc : docs) { @@ -301,8 +302,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { indexReader.close(); directory.close(); - List docs = IndexerUtils.processBuckets(composite, indexName, stats, - ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean()); + final GroupConfig groupConfig = randomGroupConfig(random()); + List docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(numDocs)); for (IndexRequest doc : docs) { @@ -353,11 +354,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { // The content of the config don't actually matter for this test // because the test is just looking at agg keys - GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig(); - groupConfig.setHisto(new HistogramGroupConfig(123L, "abc")); - - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), - groupConfig.build(), "foo", false); + GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(123L, "abc"), null); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", false); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("1237859798")); } @@ -400,11 +398,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { return foos; }); - GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig(); - groupConfig.setHisto(new HistogramGroupConfig(1, "abc")); - - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), - groupConfig.build(), "foo", true); + GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1L, "abc"), null); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$c9LcrFqeFW92uN_Z7sv1hA")); } @@ -453,11 +448,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { return foos; }); - GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig(); - groupConfig.setHisto(new HistogramGroupConfig(1, "abc")); - - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), - groupConfig.build(), "foo", true); + GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1, "abc"), null); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$VAFKZpyaEqYRPLyic57_qw")); } @@ -483,11 +475,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { return foos; }); - GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig(); - groupConfig.setHisto(randomHistogramGroupConfig(random())); - - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), - groupConfig.build(), "foo", randomBoolean()); + GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), randomHistogramGroupConfig(random()), null); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(1)); assertFalse(Strings.isNullOrEmpty(docs.get(0).id())); } @@ -548,8 +537,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { indexReader.close(); directory.close(); - List docs = IndexerUtils.processBuckets(composite, indexName, stats, - ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean()); + final GroupConfig groupConfig = randomGroupConfig(random()); + List docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(6)); for (IndexRequest doc : docs) { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 5799eb401f6..21a834f4b57 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -96,8 +96,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String rollupIndex = randomAlphaOfLength(10); String field = "the_histo"; DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1ms")); - RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(), - Collections.emptyList()); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); final List> dataset = new ArrayList<>(); dataset.addAll( Arrays.asList( @@ -142,8 +141,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String field = "the_histo"; DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1h")); MetricConfig config = new MetricConfig("counter", Arrays.asList("avg", "sum", "max", "min")); - RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(), - Collections.singletonList(config)); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.singletonList(config)); final List> dataset = new ArrayList<>(); dataset.addAll( Arrays.asList( @@ -265,8 +263,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String field = "the_histo"; DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1m"), new DateHistogramInterval("1h"), null); - RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(), - Collections.emptyList()); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); final List> dataset = new ArrayList<>(); long now = System.currentTimeMillis(); dataset.addAll( @@ -347,8 +344,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String rollupIndex = randomAlphaOfLengthBetween(5, 10); String field = "the_histo"; DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1d"), null, timeZone); - RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(), - Collections.emptyList()); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); executeTestCase(dataset, job, now, (resp) -> { assertThat(resp.size(), equalTo(1)); @@ -410,8 +406,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(timestampField, new DateHistogramInterval(timeInterval)); MetricConfig metricConfig = new MetricConfig(valueField, Collections.singletonList("avg")); - RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(), - Collections.singletonList(metricConfig)); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.singletonList(metricConfig)); final List> dataset = new ArrayList<>(); int numDocs = randomIntBetween(1,100); @@ -477,7 +472,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { Directory dir = index(docs, fieldTypeLookup); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); - String dateHistoField = config.getGroupConfig().getDateHisto().getField(); + String dateHistoField = config.getGroupConfig().getDateHistogram().getField(); final ExecutorService executor = Executors.newFixedThreadPool(1); try { RollupJob job = new RollupJob(config, Collections.emptyMap()); @@ -499,14 +494,14 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { */ private Map createFieldTypes(RollupJobConfig job) { Map fieldTypes = new HashMap<>(); - MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHisto().getField()) + MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHistogram().getField()) .dateTimeFormatter(Joda.forPattern(randomFrom("basic_date", "date_optional_time", "epoch_second"))) .build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0))) .fieldType(); fieldTypes.put(fieldType.name(), fieldType); - if (job.getGroupConfig().getHisto() != null) { - for (String field : job.getGroupConfig().getHisto().getFields()) { + if (job.getGroupConfig().getHistogram() != null) { + for (String field : job.getGroupConfig().getHistogram().getFields()) { MappedFieldType ft = new NumberFieldMapper.Builder(field, NumberFieldMapper.NumberType.LONG) .build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0))) .fieldType(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index c645a0e3005..733a784b843 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -273,7 +273,7 @@ public class RollupIndexerStateTests extends ESTestCase { // and make sure the appropriate error is thrown when(config.getGroupConfig()).then((Answer) invocationOnMock -> { state.set(IndexerState.STOPPED); - return ConfigTestHelpers.getGroupConfig().build(); + return ConfigTestHelpers.randomGroupConfig(random()); }); RollupJob job = new RollupJob(config, Collections.emptyMap()); From f57cb10d2c6f3a99bb54945fe7b8a55a255473da Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 7 Aug 2018 10:30:45 +0200 Subject: [PATCH 12/16] Tests: Fix Typo Causing Flaky Settings Test (#32665) * We were comparing the wrong timeout value in the `randomValueOtherThan` call here, leading to no mutation happening for a certain seed * closes #32639 --- .../settings/put/UpdateSettingsRequestStreamableTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java index e8dd3943cb7..83ddb456551 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -41,7 +41,7 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest List mutators = new ArrayList<>(); mutators.add(() -> mutation .masterNodeTimeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); - mutators.add(() -> mutation.timeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); + mutators.add(() -> mutation.timeout(randomValueOtherThan(request.timeout().getStringRep(), ESTestCase::randomTimeValue))); mutators.add(() -> mutation.settings(mutateSettings(request.settings()))); mutators.add(() -> mutation.indices(mutateIndices(request.indices()))); mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(request.indicesOptions(), From ab81078949fadd519a0a36cb5cf9b45ba05b2f85 Mon Sep 17 00:00:00 2001 From: simonzheng Date: Tue, 7 Aug 2018 04:33:46 -0400 Subject: [PATCH 13/16] =?UTF-8?q?[Docs]=C2=A0Correct=20a=20small=20typo=20?= =?UTF-8?q?(#32655)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/reference/setup/important-settings/node-name.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/setup/important-settings/node-name.asciidoc b/docs/reference/setup/important-settings/node-name.asciidoc index fab7ddcf118..5980d8e284e 100644 --- a/docs/reference/setup/important-settings/node-name.asciidoc +++ b/docs/reference/setup/important-settings/node-name.asciidoc @@ -2,7 +2,7 @@ === `node.name` By default, Elasticsearch will use the first seven characters of the randomly -generated UUID as the node id.Note that the node id is persisted and does +generated UUID as the node id. Note that the node id is persisted and does not change when a node restarts and therefore the default node name will also not change. @@ -19,4 +19,4 @@ The `node.name` can also be set to the server's HOSTNAME as follows: [source,yaml] -------------------------------------------------- node.name: ${HOSTNAME} --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- From 6fe6247dc80ebf3da6fdd522bff779d73b1d192f Mon Sep 17 00:00:00 2001 From: Parth Verma Date: Tue, 7 Aug 2018 14:26:44 +0530 Subject: [PATCH 14/16] Ignore script fields when size is 0 (#31917) This change adds a check so that when parsing the search source, script fields are ignored when the requested search result size is 0. This helps with e.g. clients like Kibana that sends a list of script fields that they may need for convenience, but they don't require any hits. Before this change, user sometimes ran into confusing behaviour, e.g. the script compilation limit to breaking although no hits were requested. Closes #31824 --- .../org/elasticsearch/search/SearchService.java | 2 +- .../search/SearchServiceTests.java | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 59af043e0cf..4bf5e03b8a7 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -806,7 +806,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv throw new SearchContextException(context, "failed to create SearchContextHighlighter", e); } } - if (source.scriptFields() != null) { + if (source.scriptFields() != null && source.size() != 0) { int maxAllowedScriptFields = context.mapperService().getIndexSettings().getMaxScriptFields(); if (source.scriptFields().size() > maxAllowedScriptFields) { throw new IllegalArgumentException( diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index c58a158fc67..2562683466a 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -345,6 +345,23 @@ public class SearchServiceTests extends ESSingleNodeTestCase { } } + public void testIgnoreScriptfieldIfSizeZero() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.scriptField("field" + 0, + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); + searchSourceBuilder.size(0); + try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) { + assertEquals(0, context.scriptFields().fields().size()); + } + } + public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { @Override public List> getQueries() { From 1f50950099fa156269d56bd78378f6732527059b Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 7 Aug 2018 13:21:39 +0300 Subject: [PATCH 15/16] Add @AwaitsFix for #32673 --- .../xpack/monitoring/exporter/http/HttpExporterSslIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java index a0511dc17aa..52f2a3b1d10 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java @@ -134,6 +134,7 @@ public class HttpExporterSslIT extends MonitoringIntegTestCase { clearTransientSettings("plaintext"); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32673") public void testCanAddNewExporterWithSsl() { Path truststore = getDataPath("/org/elasticsearch/xpack/monitoring/exporter/http/testnode.jks"); assertThat(Files.exists(truststore), CoreMatchers.is(true)); From 6449d9bc1438ce00d8e34aee4589228301def059 Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Tue, 7 Aug 2018 13:03:43 +0200 Subject: [PATCH 16/16] Include translog path in error message when translog is corrupted (#32251) Currently, when TranslogCorruptedException is thrown most of the times it does not contain information about the translog location on the file system. There is the translog recovery tool that accepts the translog path as an argument and users are constantly puzzled where to get the path. This pull request adds "source" information to every TranslogCorruptedException thrown. The source could be local file, remote translog source (used for recovery), assertion (translog entry is constructed to perform some assertion) or translog constructed inside the test. Closes #24929 --- .../index/translog/BaseTranslogReader.java | 12 +++-- .../translog/BufferedChecksumStreamInput.java | 18 ++++--- .../index/translog/Translog.java | 16 +++--- .../translog/TranslogCorruptedException.java | 22 ++++++-- .../index/translog/TranslogHeader.java | 20 ++++--- .../index/translog/TranslogWriter.java | 9 ++-- .../translog/TruncatedTranslogException.java | 9 ++-- .../RecoveryTranslogOperationsRequest.java | 2 +- .../index/store/CorruptedTranslogIT.java | 12 +++-- .../index/translog/TestTranslog.java | 52 +++++++------------ .../index/translog/TranslogTests.java | 50 ++++++------------ .../index/translog/TruncateTranslogIT.java | 14 ++--- 12 files changed, 121 insertions(+), 115 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index ff226ae00be..41c3252eab0 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -79,7 +79,9 @@ public abstract class BaseTranslogReader implements Comparable maxSize) { - throw new TranslogCorruptedException("operation size is corrupted must be [0.." + maxSize + "] but was: " + size); + throw new TranslogCorruptedException( + path.toString(), + "operation size is corrupted must be [0.." + maxSize + "] but was: " + size); } return size; } @@ -103,14 +105,16 @@ public abstract class BaseTranslogReader implements Comparable getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) { - throw new TranslogCorruptedException("Operation's term is newer than translog header term; " + - "operation term[" + op.primaryTerm() + "], translog header term [" + getPrimaryTerm() + "]"); + throw new TranslogCorruptedException( + path.toString(), + "operation's term is newer than translog header term; " + + "operation term[" + op.primaryTerm() + "], translog header term [" + getPrimaryTerm() + "]"); } return op; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 37740b460b7..8e815d3599a 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -35,14 +35,11 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput { private static final int SKIP_BUFFER_SIZE = 1024; private byte[] skipBuffer; private final Checksum digest; + private final String source; - public BufferedChecksumStreamInput(StreamInput in) { - super(in); - this.digest = new BufferedChecksum(new CRC32()); - } - - public BufferedChecksumStreamInput(StreamInput in, BufferedChecksumStreamInput reuse) { + public BufferedChecksumStreamInput(StreamInput in, String source, BufferedChecksumStreamInput reuse) { super(in); + this.source = source; if (reuse == null ) { this.digest = new BufferedChecksum(new CRC32()); } else { @@ -52,6 +49,10 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput { } } + public BufferedChecksumStreamInput(StreamInput in, String source) { + this(in, source, null); + } + public long getChecksum() { return this.digest.getValue(); } @@ -85,7 +86,6 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput { return delegate.markSupported(); } - @Override public long skip(long numBytes) throws IOException { if (numBytes < 0) { @@ -104,7 +104,6 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput { return skipped; } - @Override public synchronized void mark(int readlimit) { delegate.mark(readlimit); @@ -114,4 +113,7 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput { digest.reset(); } + public String getSource(){ + return source; + } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 04744bc68c4..e426b3a7253 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -1427,7 +1427,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC long expectedChecksum = in.getChecksum(); long readChecksum = Integer.toUnsignedLong(in.readInt()); if (readChecksum != expectedChecksum) { - throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" + + throw new TranslogCorruptedException(in.getSource(), "checksum verification failed - expected: 0x" + Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); } } @@ -1435,10 +1435,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC /** * Reads a list of operations written with {@link #writeOperations(StreamOutput, List)} */ - public static List readOperations(StreamInput input) throws IOException { + public static List readOperations(StreamInput input, String source) throws IOException { ArrayList operations = new ArrayList<>(); int numOps = input.readInt(); - final BufferedChecksumStreamInput checksumStreamInput = new BufferedChecksumStreamInput(input); + final BufferedChecksumStreamInput checksumStreamInput = new BufferedChecksumStreamInput(input, source); for (int i = 0; i < numOps; i++) { operations.add(readOperation(checksumStreamInput)); } @@ -1450,7 +1450,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { final int opSize = in.readInt(); if (opSize < 4) { // 4byte for the checksum - throw new TranslogCorruptedException("operation size must be at least 4 but was: " + opSize); + throw new TranslogCorruptedException(in.getSource(), "operation size must be at least 4 but was: " + opSize); } in.resetDigest(); // size is not part of the checksum! if (in.markSupported()) { // if we can we validate the checksum first @@ -1465,17 +1465,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } operation = Translog.Operation.readOperation(in); verifyChecksum(in); - } catch (TranslogCorruptedException e) { - throw e; } catch (EOFException e) { - throw new TruncatedTranslogException("reached premature end of file, translog is truncated", e); + throw new TruncatedTranslogException(in.getSource(), "reached premature end of file, translog is truncated", e); } return operation; } /** * Writes all operations in the given iterable to the given output stream including the size of the array - * use {@link #readOperations(StreamInput)} to read it back. + * use {@link #readOperations(StreamInput, String)} to read it back. */ public static void writeOperations(StreamOutput outStream, List toWrite) throws IOException { final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(BigArrays.NON_RECYCLING_INSTANCE); @@ -1716,7 +1714,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } catch (TranslogCorruptedException ex) { throw ex; // just bubble up. } catch (Exception ex) { - throw new TranslogCorruptedException("Translog at [" + location + "] is corrupted", ex); + throw new TranslogCorruptedException(location.toString(), ex); } return checkpoint; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogCorruptedException.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogCorruptedException.java index 07700b3037c..ab1a48b2167 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogCorruptedException.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogCorruptedException.java @@ -25,15 +25,27 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; public class TranslogCorruptedException extends ElasticsearchException { - public TranslogCorruptedException(String msg) { - super(msg); + public TranslogCorruptedException(String source, String details) { + super(corruptedMessage(source, details)); } - public TranslogCorruptedException(String msg, Throwable cause) { - super(msg, cause); + public TranslogCorruptedException(String source, Throwable cause) { + this(source, null, cause); } - public TranslogCorruptedException(StreamInput in) throws IOException{ + public TranslogCorruptedException(String source, String details, Throwable cause) { + super(corruptedMessage(source, details), cause); + } + + private static String corruptedMessage(String source, String details) { + String msg = "translog from source [" + source + "] is corrupted"; + if (details != null) { + msg += ", " + details; + } + return msg; + } + + public TranslogCorruptedException(StreamInput in) throws IOException { super(in); } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java index 0fde24d8bb4..20aadf21bcb 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -110,13 +110,15 @@ final class TranslogHeader { static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException { // This input is intentionally not closed because closing it will close the FileChannel. final BufferedChecksumStreamInput in = - new BufferedChecksumStreamInput(new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size())); + new BufferedChecksumStreamInput( + new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size()), + path.toString()); final int version; try { version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_CHECKSUMS, VERSION_PRIMARY_TERM); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { tryReportOldVersionError(path, channel); - throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e); + throw new TranslogCorruptedException(path.toString(), "translog header corrupted", e); } if (version == VERSION_CHECKSUMS) { throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); @@ -124,15 +126,19 @@ final class TranslogHeader { // Read the translogUUID final int uuidLen = in.readInt(); if (uuidLen > channel.size()) { - throw new TranslogCorruptedException("uuid length can't be larger than the translog"); + throw new TranslogCorruptedException( + path.toString(), + "UUID length can't be larger than the translog"); } final BytesRef uuid = new BytesRef(uuidLen); uuid.length = uuidLen; in.read(uuid.bytes, uuid.offset, uuid.length); final BytesRef expectedUUID = new BytesRef(translogUUID); if (uuid.bytesEquals(expectedUUID) == false) { - throw new TranslogCorruptedException("expected shard UUID " + expectedUUID + " but got: " + uuid + - " this translog file belongs to a different translog. path:" + path); + throw new TranslogCorruptedException( + path.toString(), + "expected shard UUID " + expectedUUID + " but got: " + uuid + + " this translog file belongs to a different translog"); } // Read the primary term final long primaryTerm; @@ -164,7 +170,9 @@ final class TranslogHeader { // 0x00 => version 0 of the translog final byte b1 = Channels.readFromFileChannel(channel, 0, 1)[0]; if (b1 == 0x3f) { // LUCENE_CODEC_HEADER_BYTE - throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path); + throw new TranslogCorruptedException( + path.toString(), + "translog looks like version 1 or later, but has corrupted header" ); } else if (b1 == 0x00) { // UNVERSIONED_TRANSLOG_HEADER_BYTE throw new IllegalStateException("pre-1.4 translog found [" + path + "]"); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index c135facc67f..b779644cd5c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -200,8 +200,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } else if (seenSequenceNumbers.containsKey(seqNo)) { final Tuple previous = seenSequenceNumbers.get(seqNo); if (previous.v1().equals(data) == false) { - Translog.Operation newOp = Translog.readOperation(new BufferedChecksumStreamInput(data.streamInput())); - Translog.Operation prvOp = Translog.readOperation(new BufferedChecksumStreamInput(previous.v1().streamInput())); + Translog.Operation newOp = Translog.readOperation( + new BufferedChecksumStreamInput(data.streamInput(), "assertion")); + Translog.Operation prvOp = Translog.readOperation( + new BufferedChecksumStreamInput(previous.v1().streamInput(), "assertion")); if (newOp.equals(prvOp) == false) { throw new AssertionError( "seqNo [" + seqNo + "] was processed twice in generation [" + generation + "], with different data. " + @@ -220,7 +222,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { .forEach(e -> { final Translog.Operation op; try { - op = Translog.readOperation(new BufferedChecksumStreamInput(e.getValue().v1().streamInput())); + op = Translog.readOperation( + new BufferedChecksumStreamInput(e.getValue().v1().streamInput(), "assertion")); } catch (IOException ex) { throw new RuntimeException(ex); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncatedTranslogException.java b/server/src/main/java/org/elasticsearch/index/translog/TruncatedTranslogException.java index e04eb58068d..5e0be02b7fc 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncatedTranslogException.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncatedTranslogException.java @@ -25,11 +25,12 @@ import java.io.IOException; public class TruncatedTranslogException extends TranslogCorruptedException { - public TruncatedTranslogException(String msg, Throwable cause) { - super(msg, cause); - } - public TruncatedTranslogException(StreamInput in) throws IOException { super(in); } + + public TruncatedTranslogException(String source, String details, Throwable cause) { + super(source, details, cause); + } + } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index 46494626920..be399e0f81f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -66,7 +66,7 @@ public class RecoveryTranslogOperationsRequest extends TransportRequest { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); - operations = Translog.readOperations(in); + operations = Translog.readOperations(in, "recovery"); totalTranslogOps = in.readVInt(); } diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index 9cc6d86bc2f..7d548fc42d6 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.transport.MockTransportService; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; @@ -86,7 +87,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase { indexRandom(false, false, false, Arrays.asList(builders)); // this one // Corrupt the translog file(s) - corruptRandomTranslogFiles(); + corruptRandomTranslogFile(); // Restart the single node internalCluster().fullRestart(); @@ -102,7 +103,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase { } - private void corruptRandomTranslogFiles() throws IOException { + private void corruptRandomTranslogFile() throws IOException { ClusterState state = client().admin().cluster().prepareState().get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); final Index test = state.metaData().index("test").getIndex(); @@ -119,9 +120,12 @@ public class CorruptedTranslogIT extends ESIntegTestCase { String path = fsPath.getPath(); String relativeDataLocationPath = "indices/" + test.getUUID() + "/" + Integer.toString(shardRouting.getId()) + "/translog"; Path translogDir = PathUtils.get(path).resolve(relativeDataLocationPath); - translogDirs.add(translogDir); + if (Files.isDirectory(translogDir)) { + translogDirs.add(translogDir); + } } - TestTranslog.corruptTranslogFiles(logger, random(), translogDirs); + Path translogDir = RandomPicks.randomFrom(random(), translogDirs); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, TestTranslog.minTranslogGenUsedInRecovery(translogDir)); } /** Disables translog flushing for the specified index */ diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index 7ab9fa67330..f37ec5a8e55 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -34,8 +34,6 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collection; -import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.Set; @@ -45,7 +43,8 @@ import java.util.regex.Pattern; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.not; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsNot.not; /** * Helpers for testing translog. @@ -54,44 +53,33 @@ public class TestTranslog { static final Pattern TRANSLOG_FILE_PATTERN = Pattern.compile("translog-(\\d+)\\.tlog"); /** - * Corrupts some translog files (translog-N.tlog) from the given translog directories. + * Corrupts random translog file (translog-N.tlog) from the given translog directory. * - * @return a collection of tlog files that have been corrupted. + * @return a translog file which has been corrupted. */ - public static Set corruptTranslogFiles(Logger logger, Random random, Collection translogDirs) throws IOException { + public static Path corruptRandomTranslogFile(Logger logger, Random random, Path translogDir, long minGeneration) throws + IOException { Set candidates = new TreeSet<>(); // TreeSet makes sure iteration order is deterministic - for (Path translogDir : translogDirs) { - if (Files.isDirectory(translogDir)) { - final long minUsedTranslogGen = minTranslogGenUsedInRecovery(translogDir); - logger.info("--> Translog dir [{}], minUsedTranslogGen [{}]", translogDir, minUsedTranslogGen); - try (DirectoryStream stream = Files.newDirectoryStream(translogDir)) { - for (Path item : stream) { - if (Files.isRegularFile(item)) { - // Makes sure that we will corrupt tlog files that are referenced by the Checkpoint. - final Matcher matcher = TRANSLOG_FILE_PATTERN.matcher(item.getFileName().toString()); - if (matcher.matches() && Long.parseLong(matcher.group(1)) >= minUsedTranslogGen) { - candidates.add(item); - } - } + logger.info("--> Translog dir [{}], minUsedTranslogGen [{}]", translogDir, minGeneration); + try (DirectoryStream stream = Files.newDirectoryStream(translogDir)) { + for (Path item : stream) { + if (Files.isRegularFile(item)) { + final Matcher matcher = TRANSLOG_FILE_PATTERN.matcher(item.getFileName().toString()); + if (matcher.matches() && Long.parseLong(matcher.group(1)) >= minGeneration) { + candidates.add(item); } } } } + assertThat(candidates, is(not(empty()))); - Set corruptedFiles = new HashSet<>(); - if (!candidates.isEmpty()) { - int corruptions = RandomNumbers.randomIntBetween(random, 5, 20); - for (int i = 0; i < corruptions; i++) { - Path fileToCorrupt = RandomPicks.randomFrom(random, candidates); - corruptFile(logger, random, fileToCorrupt); - corruptedFiles.add(fileToCorrupt); - } - } - assertThat("no translog file corrupted", corruptedFiles, not(empty())); - return corruptedFiles; + Path corruptedFile = RandomPicks.randomFrom(random, candidates); + corruptFile(logger, random, corruptedFile); + return corruptedFile; } - static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException { + + static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException { try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) { // read raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1)); @@ -117,7 +105,7 @@ public class TestTranslog { /** * Lists all existing commits in a given index path, then read the minimum translog generation that will be used in recoverFromTranslog. */ - private static long minTranslogGenUsedInRecovery(Path translogPath) throws IOException { + public static long minTranslogGenUsedInRecovery(Path translogPath) throws IOException { try (NIOFSDirectory directory = new NIOFSDirectory(translogPath.getParent().resolve("index"))) { List commits = DirectoryReader.listCommits(directory); final String translogUUID = commits.get(commits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index dc0d871a7f2..1c27a59e0ec 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -748,7 +748,9 @@ public class TranslogTests extends ESTestCase { } - public void testTranslogChecksums() throws Exception { + public void testTranslogCorruption() throws Exception { + TranslogConfig config = translog.getConfig(); + String uuid = translog.getTranslogUUID(); List locations = new ArrayList<>(); int translogOperations = randomIntBetween(10, 100); @@ -756,23 +758,23 @@ public class TranslogTests extends ESTestCase { String ascii = randomAlphaOfLengthBetween(1, 50); locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); } - translog.sync(); + translog.close(); - corruptTranslogs(translogDir); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, 0); + int corruptionsCaught = 0; - AtomicInteger corruptionsCaught = new AtomicInteger(0); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - for (Translog.Location location : locations) { - try { - Translog.Operation next = snapshot.next(); - assertNotNull(next); - } catch (TranslogCorruptedException e) { - corruptionsCaught.incrementAndGet(); + try (Translog translog = openTranslog(config, uuid)) { + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + for (Location loc : locations) { + snapshot.next(); } } - expectThrows(TranslogCorruptedException.class, snapshot::next); - assertThat("at least one corruption was caused and caught", corruptionsCaught.get(), greaterThanOrEqualTo(1)); + } catch (TranslogCorruptedException e) { + assertThat(e.getMessage(), containsString(translogDir.toString())); + corruptionsCaught++; } + + assertThat("corruption is caught", corruptionsCaught, greaterThanOrEqualTo(1)); } public void testTruncatedTranslogs() throws Exception { @@ -816,25 +818,6 @@ public class TranslogTests extends ESTestCase { } - /** - * Randomly overwrite some bytes in the translog files - */ - private void corruptTranslogs(Path directory) throws Exception { - Path[] files = FileSystemUtils.files(directory, "translog-*"); - for (Path file : files) { - logger.info("--> corrupting {}...", file); - FileChannel f = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE); - int corruptions = scaledRandomIntBetween(10, 50); - for (int i = 0; i < corruptions; i++) { - // note: with the current logic, this will sometimes be a no-op - long pos = randomIntBetween(0, (int) f.size()); - ByteBuffer junk = ByteBuffer.wrap(new byte[]{randomByte()}); - f.write(junk, pos); - } - f.close(); - } - } - private Term newUid(ParsedDocument doc) { return new Term("_id", Uid.encodeId(doc.id())); } @@ -1505,7 +1488,8 @@ public class TranslogTests extends ESTestCase { ops.add(test); } Translog.writeOperations(out, ops); - final List readOperations = Translog.readOperations(out.bytes().streamInput()); + final List readOperations = Translog.readOperations( + out.bytes().streamInput(), "testSnapshotFromStreamInput"); assertEquals(ops.size(), readOperations.size()); assertEquals(ops, readOperations); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java index 029ed50fb28..cd4605b7e2d 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java @@ -153,9 +153,9 @@ public class TruncateTranslogIT extends ESIntegTestCase { // shut down the replica node to be tested later internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); - // Corrupt the translog file(s) + // Corrupt the translog file logger.info("--> corrupting translog"); - corruptRandomTranslogFiles("test"); + corruptRandomTranslogFile("test"); // Restart the single node logger.info("--> restarting node"); @@ -267,15 +267,16 @@ public class TruncateTranslogIT extends ESIntegTestCase { // sample the replica node translog dirs final ShardId shardId = new ShardId(resolveIndex("test"), 0); Set translogDirs = getTranslogDirs(replicaNode, shardId); + Path tdir = randomFrom(translogDirs); // stop the cluster nodes. we don't use full restart so the node start up order will be the same // and shard roles will be maintained internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); - // Corrupt the translog file(s) + // Corrupt the translog file logger.info("--> corrupting translog"); - TestTranslog.corruptTranslogFiles(logger, random(), translogDirs); + TestTranslog.corruptRandomTranslogFile(logger, random(), tdir, TestTranslog.minTranslogGenUsedInRecovery(tdir)); // Restart the single node logger.info("--> starting node"); @@ -358,9 +359,10 @@ public class TruncateTranslogIT extends ESIntegTestCase { return translogDirs; } - private void corruptRandomTranslogFiles(String indexName) throws IOException { + private void corruptRandomTranslogFile(String indexName) throws IOException { Set translogDirs = getTranslogDirs(indexName); - TestTranslog.corruptTranslogFiles(logger, random(), translogDirs); + Path translogDir = randomFrom(translogDirs); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, TestTranslog.minTranslogGenUsedInRecovery(translogDir)); } /** Disables translog flushing for the specified index */