From 08ecdfe20ecfa58631aca8ac0bc49470f9a8ea94 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 9 Apr 2019 07:21:51 +0100 Subject: [PATCH 01/60] Short-circuit rebalancing when disabled (#40966) Today if `cluster.routing.rebalance.enable: none` then rebalancing is disabled, but we still execute `balanceByWeights()` and perform some rather expensive calculations before discovering that we cannot rebalance any shards. In a large cluster this can make cluster state updates occur rather slowly. With this change we check earlier whether rebalancing is globally disabled and, if so, avoid the rebalancing process entirely. Relates #40942 which was reverted because of egregiously faulty tests. --- .../decider/EnableAllocationDecider.java | 30 ++- .../EnableAllocationShortCircuitTests.java | 233 ++++++++++++++++++ 2 files changed, 260 insertions(+), 3 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 8a72fe8cb49..c73a630bb66 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -86,16 +86,21 @@ public class EnableAllocationDecider extends AllocationDecider { clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); } - public void setEnableRebalance(Rebalance enableRebalance) { + private void setEnableRebalance(Rebalance enableRebalance) { this.enableRebalance = enableRebalance; } - public void setEnableAllocation(Allocation enableAllocation) { + private void setEnableAllocation(Allocation enableAllocation) { this.enableAllocation = enableAllocation; } @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return canAllocate(shardRouting, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { return allocation.decision(Decision.YES, NAME, "explicitly ignoring any disabling of allocation due to manual allocation commands via the reroute API"); @@ -136,10 +141,29 @@ public class EnableAllocationDecider extends AllocationDecider { } } + @Override + public Decision canRebalance(RoutingAllocation allocation) { + if (allocation.ignoreDisable()) { + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); + } + + if (enableRebalance == Rebalance.NONE) { + for (IndexMetaData indexMetaData : allocation.metaData()) { + if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexMetaData.getSettings()) + && INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexMetaData.getSettings()) != Rebalance.NONE) { + return allocation.decision(Decision.YES, NAME, "rebalancing is permitted on one or more indices"); + } + } + return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to %s", setting(enableRebalance, false)); + } + + return allocation.decision(Decision.YES, NAME, "rebalancing is not globally disabled"); + } + @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { - return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation"); + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); } Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java new file mode 100644 index 00000000000..9fcd3d97f1f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.test.gateway.TestGatewayAllocator; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class EnableAllocationShortCircuitTests extends ESAllocationTestCase { + + private static ClusterState createClusterStateWithAllShardsAssigned() { + AllocationService allocationService = createAllocationService(Settings.EMPTY); + + final int numberOfNodes = randomIntBetween(1, 5); + final DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < numberOfNodes; i++) { + discoveryNodesBuilder.add(newNode("node" + i)); + } + + final MetaData.Builder metadataBuilder = MetaData.builder(); + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + for (int i = randomIntBetween(1, 10); i >= 0; i--) { + final IndexMetaData indexMetaData = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(randomIntBetween(0, numberOfNodes - 1)).build(); + metadataBuilder.put(indexMetaData, true); + routingTableBuilder.addAsNew(indexMetaData); + } + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(Settings.EMPTY)) + .nodes(discoveryNodesBuilder).metaData(metadataBuilder).routingTable(routingTableBuilder.build()).build(); + + while (clusterState.getRoutingNodes().hasUnassignedShards() + || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).isEmpty() == false) { + clusterState = allocationService.applyStartedShards(clusterState, + clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING)); + clusterState = allocationService.reroute(clusterState, "reroute"); + } + + return clusterState; + } + + public void testRebalancingAttemptedIfPermitted() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomFrom(EnableAllocationDecider.Rebalance.ALL, + EnableAllocationDecider.Rebalance.PRIMARIES, + EnableAllocationDecider.Rebalance.REPLICAS).name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, greaterThan(0)); + } + + public void testRebalancingSkippedIfDisabled() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, equalTo(0)); + } + + public void testRebalancingSkippedIfDisabledIncludingOnSpecificIndices() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) + .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()))).build()).build(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, equalTo(0)); + } + + public void testRebalancingAttemptedIfDisabledButOverridenOnSpecificIndices() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) + .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomFrom(EnableAllocationDecider.Rebalance.ALL, + EnableAllocationDecider.Rebalance.PRIMARIES, + EnableAllocationDecider.Rebalance.REPLICAS).name()))).build()).build(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, greaterThan(0)); + } + + public void testAllocationSkippedIfDisabled() { + final AllocateShortCircuitPlugin plugin = new AllocateShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), + plugin); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.canAllocateAttempts, equalTo(0)); + } + + private static AllocationService createAllocationService(Settings.Builder settings, ClusterPlugin plugin) { + final ClusterSettings emptyClusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + List deciders = new ArrayList<>(ClusterModule.createAllocationDeciders(settings.build(), emptyClusterSettings, + Collections.singletonList(plugin))); + return new MockAllocationService( + new AllocationDeciders(deciders), + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + } + + private static class RebalanceShortCircuitPlugin implements ClusterPlugin { + int rebalanceAttempts; + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return Collections.singletonList(new RebalanceShortCircuitAllocationDecider()); + } + + private class RebalanceShortCircuitAllocationDecider extends AllocationDecider { + + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + rebalanceAttempts++; + return super.canRebalance(shardRouting, allocation); + } + + @Override + public Decision canRebalance(RoutingAllocation allocation) { + rebalanceAttempts++; + return super.canRebalance(allocation); + } + } + } + + private static class AllocateShortCircuitPlugin implements ClusterPlugin { + int canAllocateAttempts; + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return Collections.singletonList(new AllocateShortCircuitAllocationDecider()); + } + + private class AllocateShortCircuitAllocationDecider extends AllocationDecider { + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(shardRouting, node, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(shardRouting, allocation); + } + + @Override + public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(indexMetaData, node, allocation); + } + + @Override + public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(node, allocation); + } + } + } +} From 40638d7b28dc1714af737ac603f667cab77be46e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 9 Apr 2019 13:46:33 +0200 Subject: [PATCH 02/60] [Docs] Delete explanation for completion suggester default analyzer choice (#36720) The explanation given in the completion suggester documentation why we use the "simple" analyzer as the default is no longer valid. Since we still use "simple" as the default, we should just delete the explanation that doesn't fit anymore. Closes #36715 --- docs/reference/search/suggesters/completion-suggest.asciidoc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index c89dce3d241..0ed0601b086 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -48,10 +48,6 @@ Mapping supports the following parameters: [horizontal] `analyzer`:: The index analyzer to use, defaults to `simple`. - In case you are wondering why we did not opt for the `standard` - analyzer: We try to have easy to understand behaviour here, and if you - index the field content `At the Drive-in`, you will not get any - suggestions for `a`, nor for `d` (the first non stopword). `search_analyzer`:: The search analyzer to use, defaults to value of `analyzer`. From c5a77e5d8cbeed241e5bbc403f0b3680d4e22173 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 9 Apr 2019 15:02:03 +0200 Subject: [PATCH 03/60] Node repurpose tool docs (#40525) Added documentation for node repurpose tool and included documentation on how to repurpose nodes safely. Adjusted order of tools in `elasticsearch-node` tool since the repurpose tool is most likely to be used. Co-Authored-By: David Turner --- docs/reference/commands/node-tool.asciidoc | 127 +++++++++++++++--- docs/reference/modules/node.asciidoc | 43 ++++++ .../cluster/coordination/NodeToolCli.java | 2 +- 3 files changed, 153 insertions(+), 19 deletions(-) diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index d7ed054f475..f070d11aa8f 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -1,15 +1,17 @@ [[node-tool]] == elasticsearch-node -The `elasticsearch-node` command enables you to perform unsafe operations that -risk data loss but which may help to recover some data in a disaster. +The `elasticsearch-node` command enables you to perform certain unsafe +operations on a node that are only possible while it is shut down. This command +allows you to adjust the <> of a node and may be able to +recover some data after a disaster. [float] === Synopsis [source,shell] -------------------------------------------------- -bin/elasticsearch-node unsafe-bootstrap|detach-cluster +bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster [--ordinal ] [-E ] [-h, --help] ([-s, --silent] | [-v, --verbose]) -------------------------------------------------- @@ -17,6 +19,60 @@ bin/elasticsearch-node unsafe-bootstrap|detach-cluster [float] === Description +This tool has three modes: + +* `elasticsearch-node repurpose` can be used to delete unwanted data from a + node if it used to be a <> or a + <> but has been repurposed not to have one + or other of these roles. + +* `elasticsearch-node unsafe-bootstrap` can be used to perform _unsafe cluster + bootstrapping_. It forces one of the nodes to form a brand-new cluster on + its own, using its local copy of the cluster metadata. + +* `elasticsearch-node detach-cluster` enables you to move nodes from one + cluster to another. This can be used to move nodes into a new cluster + created with the `elasticsearch-node unsafe-bootstap` command. If unsafe + cluster bootstrapping was not possible, it also enables you to move nodes + into a brand-new cluster. + +[[node-tool-repurpose]] +[float] +==== Changing the role of a node + +There may be situations where you want to repurpose a node without following +the <>. The `elasticsearch-node +repurpose` tool allows you to delete any excess on-disk data and start a node +after repurposing it. + +The intended use is: + +* Stop the node +* Update `elasticsearch.yml` by setting `node.master` and `node.data` as + desired. +* Run `elasticsearch-node repurpose` on the node +* Start the node + +If you run `elasticsearch-node repurpose` on a node with `node.data: false` and +`node.master: true` then it will delete any remaining shard data on that node, +but it will leave the index and cluster metadata alone. If you run +`elasticsearch-node repurpose` on a node with `node.data: false` and +`node.master: false` then it will delete any remaining shard data and index +metadata, but it will leave the cluster metadata alone. + +[WARNING] +Running this command can lead to data loss for the indices mentioned if the +data contained is not available on other nodes in the cluster. Only run this +tool if you understand and accept the possible consequences, and only after +determining that the node cannot be repurposed cleanly. + +The tool provides a summary of the data to be deleted and asks for confirmation +before making any changes. You can get detailed information about the affected +indices and shards by passing the verbose (`-v`) option. + +[float] +==== Recovering data after a disaster + Sometimes {es} nodes are temporarily stopped, perhaps because of the need to perform some maintenance activity or perhaps because of a hardware failure. After you resolve the temporary condition and restart the node, @@ -53,22 +109,10 @@ way forward that does not risk data loss, but it may be possible to use the `elasticsearch-node` tool to construct a new cluster that contains some of the data from the failed cluster. -This tool has two modes: - -* `elastisearch-node unsafe-bootstap` can be used if there is at least one - remaining master-eligible node. It forces one of the remaining nodes to form - a brand-new cluster on its own, using its local copy of the cluster metadata. - This is known as _unsafe cluster bootstrapping_. - -* `elastisearch-node detach-cluster` enables you to move nodes from one cluster - to another. This can be used to move nodes into the new cluster created with - the `elastisearch-node unsafe-bootstap` command. If unsafe cluster bootstrapping was not - possible, it also enables you to - move nodes into a brand-new cluster. [[node-tool-unsafe-bootstrap]] [float] -==== Unsafe cluster bootstrapping +===== Unsafe cluster bootstrapping If there is at least one remaining master-eligible node, but it is not possible to restart a majority of them, then the `elasticsearch-node unsafe-bootstrap` @@ -143,7 +187,7 @@ job. [[node-tool-detach-cluster]] [float] -==== Detaching nodes from their cluster +===== Detaching nodes from their cluster It is unsafe for nodes to move between clusters, because different clusters have completely different cluster metadata. There is no way to safely merge the @@ -206,9 +250,12 @@ The message `Node was successfully detached from the cluster` does not mean that there has been no data loss, it just means that tool was able to complete its job. + [float] === Parameters +`repurpose`:: Delete excess data when a node's roles are changed. + `unsafe-bootstrap`:: Specifies to unsafely bootstrap this node as a new one-node cluster. @@ -230,6 +277,51 @@ to `0`, meaning to use the first node in the data path. [float] === Examples +[float] +==== Repurposing a node as a dedicated master node (master: true, data: false) + +In this example, a former data node is repurposed as a dedicated master node. +First update the node's settings to `node.master: true` and `node.data: false` +in its `elasticsearch.yml` config file. Then run the `elasticsearch-node +repurpose` command to find and remove excess shard data: + +[source,txt] +---- +node$ ./bin/elasticsearch-node repurpose + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Found 2 shards in 2 indices to clean up +Use -v to see list of paths and indices affected +Node is being re-purposed as master and no-data. Clean-up of shard data will be performed. +Do you want to proceed? +Confirm [y/N] y +Node successfully repurposed to master and no-data. +---- + +[float] +==== Repurposing a node as a coordinating-only node (master: false, data: false) + +In this example, a node that previously held data is repurposed as a +coordinating-only node. First update the node's settings to `node.master: +false` and `node.data: false` in its `elasticsearch.yml` config file. Then run +the `elasticsearch-node repurpose` command to find and remove excess shard data +and index metadata: + +[source,txt] +---- +node$./bin/elasticsearch-node repurpose + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Found 2 indices (2 shards and 2 index meta data) to clean up +Use -v to see list of paths and indices affected +Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed. +Do you want to proceed? +Confirm [y/N] y +Node successfully repurposed to no-master and no-data. +---- + [float] ==== Unsafe cluster bootstrapping @@ -331,4 +423,3 @@ Do you want to proceed? Confirm [y/N] y Node was successfully detached from the cluster ---- - diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index e9f171cce8b..f988e97ef55 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -204,6 +204,49 @@ NOTE: These settings apply only when {xpack} is not installed. To create a dedicated coordinating node when {xpack} is installed, see <>. endif::include-xpack[] +[float] +[[change-node-role]] +=== Changing the role of a node + +Each data node maintains the following data on disk: + +* the shard data for every shard allocated to that node, +* the index metadata corresponding with every shard allocated to that node, and +* the cluster-wide metadata, such as settings and index templates. + +Similarly, each master-eligible node maintains the following data on disk: + +* the index metadata for every index in the cluster, and +* the cluster-wide metadata, such as settings and index templates. + +Each node checks the contents of its data path at startup. If it discovers +unexpected data then it will refuse to start. This is to avoid importing +unwanted <> which can lead +to a red cluster health. To be more precise, nodes with `node.data: false` will +refuse to start if they find any shard data on disk at startup, and nodes with +both `node.master: false` and `node.data: false` will refuse to start if they +have any index metadata on disk at startup. + +It is possible to change the roles of a node by adjusting its +`elasticsearch.yml` file and restarting it. This is known as _repurposing_ a +node. In order to satisfy the checks for unexpected data described above, you +must perform some extra steps to prepare a node for repurposing when setting +its `node.data` or `node.master` roles to `false`: + +* If you want to repurpose a data node by changing `node.data` to `false` then + you should first use an <> to safely + migrate all the shard data onto other nodes in the cluster. + +* If you want to repurpose a node to have both `node.master: false` and + `node.data: false` then it is simplest to start a brand-new node with an + empty data path and the desired roles. You may find it safest to use an + <> to migrate the shard data + elsewhere in the cluster first. + +If it is not possible to follow these extra steps then you may be able to use +the <> tool to delete any +excess data that prevents a node from starting. + [float] == Node data path settings diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index e9f42062a7a..d6bd22bcd76 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -36,9 +36,9 @@ public class NodeToolCli extends MultiCommand { super("A CLI tool to do unsafe cluster and index manipulations on current node", ()->{}); CommandLoggingConfigurator.configureLoggingWithoutConfig(); + subcommands.put("repurpose", new NodeRepurposeCommand()); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); - subcommands.put("repurpose", new NodeRepurposeCommand()); } public static void main(String[] args) throws Exception { From 040a4961c783318803c8dc95970ba53484cdc8bb Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 9 Apr 2019 09:46:05 +0200 Subject: [PATCH 04/60] Revert "Revert "Change HLRC CCR response tests to use AbstractResponseTestCase base class. (#40257)"" (#40971) This reverts commit df91237a94fd3d3ae954eb1845c434dda692d087. --- client/rest-high-level/build.gradle | 3 + .../client/ccr/CcrStatsResponseTests.java | 411 ++++++------------ .../client/ccr/FollowInfoResponseTests.java | 118 +++-- .../client/ccr/FollowStatsResponseTests.java | 299 ++++--------- .../GetAutoFollowPatternResponseTests.java | 128 +++--- .../client/ccr/PutFollowResponseTests.java | 37 +- 6 files changed, 374 insertions(+), 622 deletions(-) diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 44262f09346..420bd6d7414 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -64,6 +64,9 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" //this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs testCompile "org.elasticsearch:rest-api-spec:${version}" + // Needed for serialization tests: + // (In order to serialize a server side class to a client side class or the other way around) + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" restSpec "org.elasticsearch:rest-api-spec:${version}" } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java index cb8072f6baf..d56b762520c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java @@ -20,50 +20,110 @@ package org.elasticsearch.client.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.client.ccr.AutoFollowStats.AutoFollowedCluster; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class CcrStatsResponseTests extends ESTestCase { +public class CcrStatsResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - CcrStatsResponseTests::createTestInstance, - CcrStatsResponseTests::toXContent, - CcrStatsResponse::fromXContent) - .supportsUnknownFields(true) - .assertEqualsConsumer(CcrStatsResponseTests::assertEqualInstances) - .assertToXContentEquivalence(false) - .test(); + @Override + protected CcrStatsAction.Response createServerTestInstance() { + org.elasticsearch.xpack.core.ccr.AutoFollowStats autoFollowStats = new org.elasticsearch.xpack.core.ccr.AutoFollowStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomReadExceptions(), + randomTrackingClusters() + ); + FollowStatsAction.StatsResponses statsResponse = createStatsResponse(); + return new CcrStatsAction.Response(autoFollowStats, statsResponse); } - // Needed, because exceptions in IndicesFollowStats and AutoFollowStats cannot be compared - private static void assertEqualInstances(CcrStatsResponse expectedInstance, CcrStatsResponse newInstance) { - assertNotSame(expectedInstance, newInstance); + static NavigableMap> randomReadExceptions() { + final int count = randomIntBetween(0, 16); + final NavigableMap> readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(), + new ElasticsearchException(new IllegalStateException("index [" + i + "]")))); + } + return readExceptions; + } + static NavigableMap randomTrackingClusters() { + final int count = randomIntBetween(0, 16); + final NavigableMap readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put("" + i, + new org.elasticsearch.xpack.core.ccr.AutoFollowStats.AutoFollowedCluster(randomLong(), randomNonNegativeLong())); + } + return readExceptions; + } + + static FollowStatsAction.StatsResponses createStatsResponse() { + int numResponses = randomIntBetween(0, 8); + List responses = new ArrayList<>(numResponses); + for (int i = 0; i < numResponses; i++) { + ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + randomAlphaOfLength(4), + randomAlphaOfLength(4), + randomAlphaOfLength(4), + randomInt(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Collections.emptyNavigableMap(), + randomLong(), + randomBoolean() ? new ElasticsearchException("fatal error") : null); + responses.add(new FollowStatsAction.StatsResponse(status)); + } + return new FollowStatsAction.StatsResponses(Collections.emptyList(), Collections.emptyList(), responses); + } + + @Override + protected CcrStatsResponse doParseToClientInstance(XContentParser parser) throws IOException { + return CcrStatsResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(CcrStatsAction.Response serverTestInstance, CcrStatsResponse clientInstance) { { - AutoFollowStats newAutoFollowStats = newInstance.getAutoFollowStats(); - AutoFollowStats expectedAutoFollowStats = expectedInstance.getAutoFollowStats(); + AutoFollowStats newAutoFollowStats = clientInstance.getAutoFollowStats(); + org.elasticsearch.xpack.core.ccr.AutoFollowStats expectedAutoFollowStats = serverTestInstance.getAutoFollowStats(); assertThat(newAutoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(expectedAutoFollowStats.getNumberOfSuccessfulFollowIndices())); assertThat(newAutoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), @@ -89,62 +149,69 @@ public class CcrStatsResponseTests extends ESTestCase { } } { - IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats(); - IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats(); + IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats(); + + // sort by index name, then shard ID + final Map> expectedIndicesFollowStats = new TreeMap<>(); + for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getFollowStats().getStatsResponses()) { + expectedIndicesFollowStats.computeIfAbsent( + statsResponse.status().followerIndex(), + k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); + } assertThat(newIndicesFollowStats.getShardFollowStats().size(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().size())); + equalTo(expectedIndicesFollowStats.size())); assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet())); + equalTo(expectedIndicesFollowStats.keySet())); for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { List newStats = indexEntry.getValue(); - List expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey()); + Map expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey()); assertThat(newStats.size(), equalTo(expectedStats.size())); for (int i = 0; i < newStats.size(); i++) { ShardFollowStats actualShardFollowStats = newStats.get(i); - ShardFollowStats expectedShardFollowStats = expectedStats.get(i); + ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status(); assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); - assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex())); - assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex())); + assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex())); + assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex())); assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo())); + equalTo(expectedShardFollowStats.leaderGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo())); assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo())); + equalTo(expectedShardFollowStats.followerGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo())); assertThat(actualShardFollowStats.getOutstandingReadRequests(), - equalTo(expectedShardFollowStats.getOutstandingReadRequests())); + equalTo(expectedShardFollowStats.outstandingReadRequests())); assertThat(actualShardFollowStats.getOutstandingWriteRequests(), - equalTo(expectedShardFollowStats.getOutstandingWriteRequests())); + equalTo(expectedShardFollowStats.outstandingWriteRequests())); assertThat(actualShardFollowStats.getWriteBufferOperationCount(), - equalTo(expectedShardFollowStats.getWriteBufferOperationCount())); + equalTo(expectedShardFollowStats.writeBufferOperationCount())); assertThat(actualShardFollowStats.getFollowerMappingVersion(), - equalTo(expectedShardFollowStats.getFollowerMappingVersion())); + equalTo(expectedShardFollowStats.followerMappingVersion())); assertThat(actualShardFollowStats.getFollowerSettingsVersion(), - equalTo(expectedShardFollowStats.getFollowerSettingsVersion())); + equalTo(expectedShardFollowStats.followerSettingsVersion())); assertThat(actualShardFollowStats.getTotalReadTimeMillis(), - equalTo(expectedShardFollowStats.getTotalReadTimeMillis())); + equalTo(expectedShardFollowStats.totalReadTimeMillis())); assertThat(actualShardFollowStats.getSuccessfulReadRequests(), - equalTo(expectedShardFollowStats.getSuccessfulReadRequests())); - assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests())); - assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads())); - assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead())); + equalTo(expectedShardFollowStats.successfulReadRequests())); + assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests())); + assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads())); + assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead())); assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), - equalTo(expectedShardFollowStats.getTotalWriteTimeMillis())); + equalTo(expectedShardFollowStats.totalWriteTimeMillis())); assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), - equalTo(expectedShardFollowStats.getSuccessfulWriteRequests())); + equalTo(expectedShardFollowStats.successfulWriteRequests())); assertThat(actualShardFollowStats.getFailedWriteRequests(), - equalTo(expectedShardFollowStats.getFailedWriteRequests())); - assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten())); + equalTo(expectedShardFollowStats.failedWriteRequests())); + assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten())); assertThat(actualShardFollowStats.getReadExceptions().size(), - equalTo(expectedShardFollowStats.getReadExceptions().size())); + equalTo(expectedShardFollowStats.readExceptions().size())); assertThat(actualShardFollowStats.getReadExceptions().keySet(), - equalTo(expectedShardFollowStats.getReadExceptions().keySet())); + equalTo(expectedShardFollowStats.readExceptions().keySet())); for (final Map.Entry> entry : actualShardFollowStats.getReadExceptions().entrySet()) { final Tuple expectedTuple = - expectedShardFollowStats.getReadExceptions().get(entry.getKey()); + expectedShardFollowStats.readExceptions().get(entry.getKey()); assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); // x-content loses the exception final ElasticsearchException expected = expectedTuple.v2(); @@ -156,246 +223,10 @@ public class CcrStatsResponseTests extends ESTestCase { assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); } assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), - equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis())); + equalTo(expectedShardFollowStats.timeSinceLastReadMillis())); } } } } - private static void toXContent(CcrStatsResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - AutoFollowStats autoFollowStats = response.getAutoFollowStats(); - builder.startObject(CcrStatsResponse.AUTO_FOLLOW_STATS_FIELD.getPreferredName()); - { - builder.field(AutoFollowStats.NUMBER_OF_SUCCESSFUL_INDICES_AUTO_FOLLOWED.getPreferredName(), - autoFollowStats.getNumberOfSuccessfulFollowIndices()); - builder.field(AutoFollowStats.NUMBER_OF_FAILED_REMOTE_CLUSTER_STATE_REQUESTS.getPreferredName(), - autoFollowStats.getNumberOfFailedRemoteClusterStateRequests()); - builder.field(AutoFollowStats.NUMBER_OF_FAILED_INDICES_AUTO_FOLLOWED.getPreferredName(), - autoFollowStats.getNumberOfFailedFollowIndices()); - builder.startArray(AutoFollowStats.RECENT_AUTO_FOLLOW_ERRORS.getPreferredName()); - for (Map.Entry> entry : - autoFollowStats.getRecentAutoFollowErrors().entrySet()) { - builder.startObject(); - { - builder.field(AutoFollowStats.LEADER_INDEX.getPreferredName(), entry.getKey()); - builder.field(AutoFollowStats.TIMESTAMP.getPreferredName(), entry.getValue().v1()); - builder.field(AutoFollowStats.AUTO_FOLLOW_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, entry.getValue().v2()); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endArray(); - builder.startArray(AutoFollowStats.AUTO_FOLLOWED_CLUSTERS.getPreferredName()); - for (Map.Entry entry : autoFollowStats.getAutoFollowedClusters().entrySet()) { - builder.startObject(); - { - builder.field(AutoFollowStats.CLUSTER_NAME.getPreferredName(), entry.getKey()); - builder.field(AutoFollowStats.TIME_SINCE_LAST_CHECK_MILLIS.getPreferredName(), - entry.getValue().getTimeSinceLastCheckMillis()); - builder.field(AutoFollowStats.LAST_SEEN_METADATA_VERSION.getPreferredName(), - entry.getValue().getLastSeenMetadataVersion()); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - - IndicesFollowStats indicesFollowStats = response.getIndicesFollowStats(); - builder.startObject(CcrStatsResponse.FOLLOW_STATS_FIELD.getPreferredName()); - { - builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName()); - for (Map.Entry> indexEntry : - indicesFollowStats.getShardFollowStats().entrySet()) { - builder.startObject(); - { - builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey()); - builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName()); - { - for (ShardFollowStats stats : indexEntry.getValue()) { - builder.startObject(); - { - builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster()); - builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex()); - builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex()); - builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId()); - builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getLeaderGlobalCheckpoint()); - builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo()); - builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getFollowerGlobalCheckpoint()); - builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), - stats.getFollowerMaxSeqNo()); - builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), - stats.getLastRequestedSeqNo()); - builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(), - stats.getOutstandingReadRequests()); - builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(), - stats.getOutstandingWriteRequests()); - builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), - stats.getWriteBufferOperationCount()); - builder.humanReadableField( - ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), - "write_buffer_size", - new ByteSizeValue(stats.getWriteBufferSizeInBytes())); - builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), - stats.getFollowerMappingVersion()); - builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(), - stats.getFollowerSettingsVersion()); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_time", - new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS)); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_remote_exec_time", - new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulReadRequests()); - builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(), - stats.getFailedReadRequests()); - builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads()); - builder.humanReadableField( - ShardFollowStats.BYTES_READ.getPreferredName(), - "total_read", - new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES)); - builder.humanReadableField( - ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), - "total_write_time", - new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulWriteRequests()); - builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(), - stats.getFailedWriteRequests()); - builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten()); - builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName()); - { - for (final Map.Entry> entry : - stats.getReadExceptions().entrySet()) { - builder.startObject(); - { - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), - entry.getKey()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(), - entry.getValue().v1()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - entry.getValue().v2()); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - builder.humanReadableField( - ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), - "time_since_last_read", - new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS)); - if (stats.getFatalException() != null) { - builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - stats.getFatalException()); - } - builder.endObject(); - } - } - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - } - builder.endObject(); - } - - private static CcrStatsResponse createTestInstance() { - return new CcrStatsResponse(randomAutoFollowStats(), randomIndicesFollowStats()); - } - - private static AutoFollowStats randomAutoFollowStats() { - final int count = randomIntBetween(0, 16); - final NavigableMap> readExceptions = new TreeMap<>(); - for (int i = 0; i < count; i++) { - readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(), - new ElasticsearchException(new IllegalStateException("index [" + i + "]")))); - } - final NavigableMap autoFollowClusters = new TreeMap<>(); - for (int i = 0; i < count; i++) { - autoFollowClusters.put("" + i, new AutoFollowedCluster(randomLong(), randomNonNegativeLong())); - } - return new AutoFollowStats( - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - readExceptions, - autoFollowClusters - ); - } - - static IndicesFollowStats randomIndicesFollowStats() { - int numIndices = randomIntBetween(0, 16); - NavigableMap> shardFollowStats = new TreeMap<>(); - for (int i = 0; i < numIndices; i++) { - String index = randomAlphaOfLength(4); - int numShards = randomIntBetween(0, 5); - List stats = new ArrayList<>(numShards); - shardFollowStats.put(index, stats); - for (int j = 0; j < numShards; j++) { - final int count = randomIntBetween(0, 16); - final NavigableMap> readExceptions = new TreeMap<>(); - for (long k = 0; k < count; k++) { - readExceptions.put(k, new Tuple<>(randomIntBetween(0, Integer.MAX_VALUE), - new ElasticsearchException(new IllegalStateException("index [" + k + "]")))); - } - - stats.add(new ShardFollowStats( - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomInt(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomLong(), - readExceptions, - randomBoolean() ? new ElasticsearchException("fatal error") : null)); - } - } - return new IndicesFollowStats(shardFollowStats); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java index 5cd327495dc..2c5bfba5025 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java @@ -19,59 +19,89 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.client.ccr.FollowInfoResponse.FollowerInfo; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; +import org.elasticsearch.xpack.core.ccr.action.FollowParameters; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; -public class FollowInfoResponseTests extends ESTestCase { +public class FollowInfoResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - FollowInfoResponseTests::createTestInstance, - FollowInfoResponseTests::toXContent, - FollowInfoResponse::fromXContent) - .supportsUnknownFields(true) - .test(); - } - - private static void toXContent(FollowInfoResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.startArray(FollowInfoResponse.FOLLOWER_INDICES_FIELD.getPreferredName()); - for (FollowerInfo info : response.getInfos()) { - builder.startObject(); - builder.field(FollowerInfo.FOLLOWER_INDEX_FIELD.getPreferredName(), info.getFollowerIndex()); - builder.field(FollowerInfo.REMOTE_CLUSTER_FIELD.getPreferredName(), info.getRemoteCluster()); - builder.field(FollowerInfo.LEADER_INDEX_FIELD.getPreferredName(), info.getLeaderIndex()); - builder.field(FollowerInfo.STATUS_FIELD.getPreferredName(), info.getStatus().getName()); - if (info.getParameters() != null) { - builder.startObject(FollowerInfo.PARAMETERS_FIELD.getPreferredName()); - { - info.getParameters().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endArray(); - builder.endObject(); - } - - private static FollowInfoResponse createTestInstance() { - int numInfos = randomIntBetween(0, 64); - List infos = new ArrayList<>(numInfos); + @Override + protected FollowInfoAction.Response createServerTestInstance() { + int numInfos = randomIntBetween(0, 32); + List infos = new ArrayList<>(numInfos); for (int i = 0; i < numInfos; i++) { - FollowInfoResponse.Status status = randomFrom(FollowInfoResponse.Status.values()); - FollowConfig followConfig = randomBoolean() ? FollowConfigTests.createTestInstance() : null; - infos.add(new FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), status, followConfig)); + FollowParameters followParameters = null; + if (randomBoolean()) { + followParameters = randomFollowParameters(); + } + + infos.add(new FollowInfoAction.Response.FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), + randomFrom(FollowInfoAction.Response.Status.values()), followParameters)); + } + return new FollowInfoAction.Response(infos); + } + + static FollowParameters randomFollowParameters() { + FollowParameters followParameters = new FollowParameters(); + followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + followParameters.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + return followParameters; + } + + @Override + protected FollowInfoResponse doParseToClientInstance(XContentParser parser) throws IOException { + return FollowInfoResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(FollowInfoAction.Response serverTestInstance, FollowInfoResponse clientInstance) { + assertThat(serverTestInstance.getFollowInfos().size(), equalTo(clientInstance.getInfos().size())); + for (int i = 0; i < serverTestInstance.getFollowInfos().size(); i++) { + FollowInfoAction.Response.FollowerInfo serverFollowInfo = serverTestInstance.getFollowInfos().get(i); + FollowInfoResponse.FollowerInfo clientFollowerInfo = clientInstance.getInfos().get(i); + + assertThat(serverFollowInfo.getRemoteCluster(), equalTo(clientFollowerInfo.getRemoteCluster())); + assertThat(serverFollowInfo.getLeaderIndex(), equalTo(clientFollowerInfo.getLeaderIndex())); + assertThat(serverFollowInfo.getFollowerIndex(), equalTo(clientFollowerInfo.getFollowerIndex())); + assertThat(serverFollowInfo.getStatus().toString().toLowerCase(Locale.ROOT), + equalTo(clientFollowerInfo.getStatus().getName().toLowerCase(Locale.ROOT))); + + FollowParameters serverParams = serverFollowInfo.getParameters(); + FollowConfig clientParams = clientFollowerInfo.getParameters(); + if (serverParams != null) { + assertThat(serverParams.getMaxReadRequestOperationCount(), equalTo(clientParams.getMaxReadRequestOperationCount())); + assertThat(serverParams.getMaxWriteRequestOperationCount(), equalTo(clientParams.getMaxWriteRequestOperationCount())); + assertThat(serverParams.getMaxOutstandingReadRequests(), equalTo(clientParams.getMaxOutstandingReadRequests())); + assertThat(serverParams.getMaxOutstandingWriteRequests(), equalTo(clientParams.getMaxOutstandingWriteRequests())); + assertThat(serverParams.getMaxReadRequestSize(), equalTo(clientParams.getMaxReadRequestSize())); + assertThat(serverParams.getMaxWriteRequestSize(), equalTo(clientParams.getMaxWriteRequestSize())); + assertThat(serverParams.getMaxWriteBufferCount(), equalTo(clientParams.getMaxWriteBufferCount())); + assertThat(serverParams.getMaxWriteBufferSize(), equalTo(clientParams.getMaxWriteBufferSize())); + assertThat(serverParams.getMaxRetryDelay(), equalTo(clientParams.getMaxRetryDelay())); + assertThat(serverParams.getReadPollTimeout(), equalTo(clientParams.getReadPollTimeout())); + } else { + assertThat(clientParams, nullValue()); + } } - return new FollowInfoResponse(infos); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java index 5ec3cb4edcf..cd7257342c7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java @@ -20,234 +20,115 @@ package org.elasticsearch.client.ccr; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; +import java.util.TreeMap; -import static org.elasticsearch.client.ccr.CcrStatsResponseTests.randomIndicesFollowStats; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.elasticsearch.client.ccr.CcrStatsResponseTests.createStatsResponse; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class FollowStatsResponseTests extends ESTestCase { +public class FollowStatsResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - FollowStatsResponseTests::createTestInstance, - FollowStatsResponseTests::toXContent, - FollowStatsResponse::fromXContent) - .supportsUnknownFields(true) - .assertEqualsConsumer(FollowStatsResponseTests::assertEqualInstances) - .assertToXContentEquivalence(false) - .test(); + @Override + protected FollowStatsAction.StatsResponses createServerTestInstance() { + return createStatsResponse(); } - // Needed, because exceptions in IndicesFollowStats cannot be compared - private static void assertEqualInstances(FollowStatsResponse expectedInstance, FollowStatsResponse newInstance) { - assertNotSame(expectedInstance, newInstance); - { - IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats(); - IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats(); - assertThat(newIndicesFollowStats.getShardFollowStats().size(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().size())); - assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet())); - for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { - List newStats = indexEntry.getValue(); - List expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey()); - assertThat(newStats.size(), equalTo(expectedStats.size())); - for (int i = 0; i < newStats.size(); i++) { - ShardFollowStats actualShardFollowStats = newStats.get(i); - ShardFollowStats expectedShardFollowStats = expectedStats.get(i); + @Override + protected FollowStatsResponse doParseToClientInstance(XContentParser parser) throws IOException { + return FollowStatsResponse.fromXContent(parser); + } - assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); - assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex())); - assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex())); - assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); - assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo())); - assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo())); - assertThat(actualShardFollowStats.getOutstandingReadRequests(), - equalTo(expectedShardFollowStats.getOutstandingReadRequests())); - assertThat(actualShardFollowStats.getOutstandingWriteRequests(), - equalTo(expectedShardFollowStats.getOutstandingWriteRequests())); - assertThat(actualShardFollowStats.getWriteBufferOperationCount(), - equalTo(expectedShardFollowStats.getWriteBufferOperationCount())); - assertThat(actualShardFollowStats.getFollowerMappingVersion(), - equalTo(expectedShardFollowStats.getFollowerMappingVersion())); - assertThat(actualShardFollowStats.getFollowerSettingsVersion(), - equalTo(expectedShardFollowStats.getFollowerSettingsVersion())); - assertThat(actualShardFollowStats.getTotalReadTimeMillis(), - equalTo(expectedShardFollowStats.getTotalReadTimeMillis())); - assertThat(actualShardFollowStats.getSuccessfulReadRequests(), - equalTo(expectedShardFollowStats.getSuccessfulReadRequests())); - assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests())); - assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads())); - assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead())); - assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), - equalTo(expectedShardFollowStats.getTotalWriteTimeMillis())); - assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), - equalTo(expectedShardFollowStats.getSuccessfulWriteRequests())); - assertThat(actualShardFollowStats.getFailedWriteRequests(), - equalTo(expectedShardFollowStats.getFailedWriteRequests())); - assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten())); - assertThat(actualShardFollowStats.getReadExceptions().size(), - equalTo(expectedShardFollowStats.getReadExceptions().size())); - assertThat(actualShardFollowStats.getReadExceptions().keySet(), - equalTo(expectedShardFollowStats.getReadExceptions().keySet())); - for (final Map.Entry> entry : - actualShardFollowStats.getReadExceptions().entrySet()) { - final Tuple expectedTuple = - expectedShardFollowStats.getReadExceptions().get(entry.getKey()); - assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); - // x-content loses the exception - final ElasticsearchException expected = expectedTuple.v2(); - assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage())); - assertNotNull(entry.getValue().v2().getCause()); - assertThat( - entry.getValue().v2().getCause(), - anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); - assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); - } - assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), - equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis())); + @Override + protected void assertInstances(FollowStatsAction.StatsResponses serverTestInstance, FollowStatsResponse clientInstance) { + IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats(); + + // sort by index name, then shard ID + final Map> expectedIndicesFollowStats = new TreeMap<>(); + for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getStatsResponses()) { + expectedIndicesFollowStats.computeIfAbsent( + statsResponse.status().followerIndex(), + k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); + } + assertThat(newIndicesFollowStats.getShardFollowStats().size(), + equalTo(expectedIndicesFollowStats.size())); + assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), + equalTo(expectedIndicesFollowStats.keySet())); + for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { + List newStats = indexEntry.getValue(); + Map expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey()); + assertThat(newStats.size(), equalTo(expectedStats.size())); + for (int i = 0; i < newStats.size(); i++) { + ShardFollowStats actualShardFollowStats = newStats.get(i); + ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status(); + + assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); + assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex())); + assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex())); + assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); + assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), + equalTo(expectedShardFollowStats.leaderGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo())); + assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), + equalTo(expectedShardFollowStats.followerGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo())); + assertThat(actualShardFollowStats.getOutstandingReadRequests(), + equalTo(expectedShardFollowStats.outstandingReadRequests())); + assertThat(actualShardFollowStats.getOutstandingWriteRequests(), + equalTo(expectedShardFollowStats.outstandingWriteRequests())); + assertThat(actualShardFollowStats.getWriteBufferOperationCount(), + equalTo(expectedShardFollowStats.writeBufferOperationCount())); + assertThat(actualShardFollowStats.getFollowerMappingVersion(), + equalTo(expectedShardFollowStats.followerMappingVersion())); + assertThat(actualShardFollowStats.getFollowerSettingsVersion(), + equalTo(expectedShardFollowStats.followerSettingsVersion())); + assertThat(actualShardFollowStats.getTotalReadTimeMillis(), + equalTo(expectedShardFollowStats.totalReadTimeMillis())); + assertThat(actualShardFollowStats.getSuccessfulReadRequests(), + equalTo(expectedShardFollowStats.successfulReadRequests())); + assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests())); + assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads())); + assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead())); + assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), + equalTo(expectedShardFollowStats.totalWriteTimeMillis())); + assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), + equalTo(expectedShardFollowStats.successfulWriteRequests())); + assertThat(actualShardFollowStats.getFailedWriteRequests(), + equalTo(expectedShardFollowStats.failedWriteRequests())); + assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten())); + assertThat(actualShardFollowStats.getReadExceptions().size(), + equalTo(expectedShardFollowStats.readExceptions().size())); + assertThat(actualShardFollowStats.getReadExceptions().keySet(), + equalTo(expectedShardFollowStats.readExceptions().keySet())); + for (final Map.Entry> entry : + actualShardFollowStats.getReadExceptions().entrySet()) { + final Tuple expectedTuple = + expectedShardFollowStats.readExceptions().get(entry.getKey()); + assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); + // x-content loses the exception + final ElasticsearchException expected = expectedTuple.v2(); + assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().v2().getCause()); + assertThat( + entry.getValue().v2().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); } + assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), + equalTo(expectedShardFollowStats.timeSinceLastReadMillis())); } } } - private static void toXContent(FollowStatsResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName()); - for (Map.Entry> indexEntry : - response.getIndicesFollowStats().getShardFollowStats().entrySet()) { - builder.startObject(); - { - builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey()); - builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName()); - { - for (ShardFollowStats stats : indexEntry.getValue()) { - builder.startObject(); - { - builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster()); - builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex()); - builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex()); - builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId()); - builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getLeaderGlobalCheckpoint()); - builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo()); - builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getFollowerGlobalCheckpoint()); - builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), - stats.getFollowerMaxSeqNo()); - builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), - stats.getLastRequestedSeqNo()); - builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(), - stats.getOutstandingReadRequests()); - builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(), - stats.getOutstandingWriteRequests()); - builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), - stats.getWriteBufferOperationCount()); - builder.humanReadableField( - ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), - "write_buffer_size", - new ByteSizeValue(stats.getWriteBufferSizeInBytes())); - builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), - stats.getFollowerMappingVersion()); - builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(), - stats.getFollowerSettingsVersion()); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_time", - new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS)); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_remote_exec_time", - new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulReadRequests()); - builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(), - stats.getFailedReadRequests()); - builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads()); - builder.humanReadableField( - ShardFollowStats.BYTES_READ.getPreferredName(), - "total_read", - new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES)); - builder.humanReadableField( - ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), - "total_write_time", - new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulWriteRequests()); - builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(), - stats.getFailedWriteRequests()); - builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten()); - builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName()); - { - for (final Map.Entry> entry : - stats.getReadExceptions().entrySet()) { - builder.startObject(); - { - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), - entry.getKey()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(), - entry.getValue().v1()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - entry.getValue().v2()); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - builder.humanReadableField( - ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), - "time_since_last_read", - new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS)); - if (stats.getFatalException() != null) { - builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - stats.getFatalException()); - } - builder.endObject(); - } - } - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - } - - private static FollowStatsResponse createTestInstance() { - return new FollowStatsResponse(randomIndicesFollowStats()); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java index f6f0f1747e2..65ef3aa062d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java @@ -19,99 +19,111 @@ package org.elasticsearch.client.ccr; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD; -import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD; -import static org.elasticsearch.client.ccr.PutFollowRequest.REMOTE_CLUSTER_FIELD; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; -public class GetAutoFollowPatternResponseTests extends ESTestCase { +public class GetAutoFollowPatternResponseTests extends AbstractResponseTestCase< + GetAutoFollowPatternAction.Response, + GetAutoFollowPatternResponse> { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - this::createTestInstance, - GetAutoFollowPatternResponseTests::toXContent, - GetAutoFollowPatternResponse::fromXContent) - .supportsUnknownFields(true) - .test(); - } - - private GetAutoFollowPatternResponse createTestInstance() { + @Override + protected GetAutoFollowPatternAction.Response createServerTestInstance() { int numPatterns = randomIntBetween(0, 16); - NavigableMap patterns = new TreeMap<>(); + NavigableMap patterns = new TreeMap<>(); for (int i = 0; i < numPatterns; i++) { - GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern( - randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4)); + String remoteCluster = randomAlphaOfLength(4); + List leaderIndexPatters = Collections.singletonList(randomAlphaOfLength(4)); + String followIndexNamePattern = randomAlphaOfLength(4); + + Integer maxOutstandingReadRequests = null; if (randomBoolean()) { - pattern.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + maxOutstandingReadRequests = randomIntBetween(0, Integer.MAX_VALUE); } + Integer maxOutstandingWriteRequests = null; if (randomBoolean()) { - pattern.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + maxOutstandingWriteRequests = randomIntBetween(0, Integer.MAX_VALUE); } + Integer maxReadRequestOperationCount = null; if (randomBoolean()) { - pattern.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + maxReadRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE); } + ByteSizeValue maxReadRequestSize = null; if (randomBoolean()) { - pattern.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + maxReadRequestSize = new ByteSizeValue(randomNonNegativeLong()); } + Integer maxWriteBufferCount = null; if (randomBoolean()) { - pattern.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + maxWriteBufferCount = randomIntBetween(0, Integer.MAX_VALUE); } + ByteSizeValue maxWriteBufferSize = null; if (randomBoolean()) { - pattern.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + maxWriteBufferSize = new ByteSizeValue(randomNonNegativeLong()); } + Integer maxWriteRequestOperationCount = null; if (randomBoolean()) { - pattern.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + maxWriteRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE); } + ByteSizeValue maxWriteRequestSize = null; if (randomBoolean()) { - pattern.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + maxWriteRequestSize = new ByteSizeValue(randomNonNegativeLong()); } + TimeValue maxRetryDelay = null; if (randomBoolean()) { - pattern.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + maxRetryDelay = new TimeValue(randomNonNegativeLong()); } + TimeValue readPollTimeout = null; if (randomBoolean()) { - pattern.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + readPollTimeout = new TimeValue(randomNonNegativeLong()); } - patterns.put(randomAlphaOfLength(4), pattern); + patterns.put(randomAlphaOfLength(4), new AutoFollowMetadata.AutoFollowPattern(remoteCluster, leaderIndexPatters, + followIndexNamePattern, maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, + maxOutstandingWriteRequests, maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, + maxRetryDelay, readPollTimeout)); } - return new GetAutoFollowPatternResponse(patterns); + return new GetAutoFollowPatternAction.Response(patterns); } - public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - builder.startArray(GetAutoFollowPatternResponse.PATTERNS_FIELD.getPreferredName()); - for (Map.Entry entry : response.getPatterns().entrySet()) { - builder.startObject(); - { - builder.field(GetAutoFollowPatternResponse.NAME_FIELD.getPreferredName(), entry.getKey()); - builder.startObject(GetAutoFollowPatternResponse.PATTERN_FIELD.getPreferredName()); - { - GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); - builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); - builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); - if (pattern.getFollowIndexNamePattern()!= null) { - builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); - } - entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); + @Override + protected GetAutoFollowPatternResponse doParseToClientInstance(XContentParser parser) throws IOException { + return GetAutoFollowPatternResponse.fromXContent(parser); } + + @Override + protected void assertInstances(GetAutoFollowPatternAction.Response serverTestInstance, GetAutoFollowPatternResponse clientInstance) { + assertThat(serverTestInstance.getAutoFollowPatterns().size(), equalTo(clientInstance.getPatterns().size())); + for (Map.Entry entry : serverTestInstance.getAutoFollowPatterns().entrySet()) { + AutoFollowMetadata.AutoFollowPattern serverPattern = entry.getValue(); + GetAutoFollowPatternResponse.Pattern clientPattern = clientInstance.getPatterns().get(entry.getKey()); + assertThat(clientPattern, notNullValue()); + + assertThat(serverPattern.getRemoteCluster(), equalTo(clientPattern.getRemoteCluster())); + assertThat(serverPattern.getLeaderIndexPatterns(), equalTo(clientPattern.getLeaderIndexPatterns())); + assertThat(serverPattern.getFollowIndexPattern(), equalTo(clientPattern.getFollowIndexNamePattern())); + assertThat(serverPattern.getMaxOutstandingReadRequests(), equalTo(clientPattern.getMaxOutstandingReadRequests())); + assertThat(serverPattern.getMaxOutstandingWriteRequests(), equalTo(clientPattern.getMaxOutstandingWriteRequests())); + assertThat(serverPattern.getMaxReadRequestOperationCount(), equalTo(clientPattern.getMaxReadRequestOperationCount())); + assertThat(serverPattern.getMaxWriteRequestOperationCount(), equalTo(clientPattern.getMaxWriteRequestOperationCount())); + assertThat(serverPattern.getMaxReadRequestSize(), equalTo(clientPattern.getMaxReadRequestSize())); + assertThat(serverPattern.getMaxWriteRequestSize(), equalTo(clientPattern.getMaxWriteRequestSize())); + assertThat(serverPattern.getMaxWriteBufferCount(), equalTo(clientPattern.getMaxWriteBufferCount())); + assertThat(serverPattern.getMaxWriteBufferSize(), equalTo(clientPattern.getMaxWriteBufferSize())); + assertThat(serverPattern.getMaxRetryDelay(), equalTo(clientPattern.getMaxRetryDelay())); + assertThat(serverPattern.getReadPollTimeout(), equalTo(clientPattern.getReadPollTimeout())); + } + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java index 00bcf535f08..52fe70b3a39 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java @@ -19,35 +19,30 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import java.io.IOException; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.hamcrest.Matchers.is; -public class PutFollowResponseTests extends ESTestCase { +public class PutFollowResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - this::createTestInstance, - PutFollowResponseTests::toXContent, - PutFollowResponse::fromXContent) - .supportsUnknownFields(true) - .test(); + @Override + protected PutFollowAction.Response createServerTestInstance() { + return new PutFollowAction.Response(randomBoolean(), randomBoolean(), randomBoolean()); } - private PutFollowResponse createTestInstance() { - return new PutFollowResponse(randomBoolean(), randomBoolean(), randomBoolean()); + @Override + protected PutFollowResponse doParseToClientInstance(XContentParser parser) throws IOException { + return PutFollowResponse.fromXContent(parser); } - public static void toXContent(PutFollowResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - builder.field(PutFollowResponse.FOLLOW_INDEX_CREATED.getPreferredName(), response.isFollowIndexCreated()); - builder.field(PutFollowResponse.FOLLOW_INDEX_SHARDS_ACKED.getPreferredName(), response.isFollowIndexShardsAcked()); - builder.field(PutFollowResponse.INDEX_FOLLOWING_STARTED.getPreferredName(), response.isIndexFollowingStarted()); - } - builder.endObject(); + @Override + protected void assertInstances(PutFollowAction.Response serverTestInstance, PutFollowResponse clientInstance) { + assertThat(serverTestInstance.isFollowIndexCreated(), is(clientInstance.isFollowIndexCreated())); + assertThat(serverTestInstance.isFollowIndexShardsAcked(), is(clientInstance.isFollowIndexShardsAcked())); + assertThat(serverTestInstance.isIndexFollowingStarted(), is(clientInstance.isIndexFollowingStarted())); } } From 3b9ab5da04e3131267cc14c1a80b0a7915c2f633 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 9 Apr 2019 16:35:25 +0200 Subject: [PATCH 05/60] Fix order of request body search parameter names in documentation (#40777) The order was random, which made it super hard to find anything. This changes the order to be alphabetically. --- docs/reference/search/request-body.asciidoc | 60 ++++++++++--------- ...on-and-seq-no.asciidoc => seq-no.asciidoc} | 17 ------ .../search/request/stored-fields.asciidoc | 2 +- .../reference/search/request/version.asciidoc | 16 +++++ 4 files changed, 48 insertions(+), 47 deletions(-) rename docs/reference/search/request/{version-and-seq-no.asciidoc => seq-no.asciidoc} (60%) create mode 100644 docs/reference/search/request/version.asciidoc diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index a721b3938fc..aa67ffec591 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -188,46 +188,48 @@ to the client. This means it includes the time spent waiting in thread pools, executing a distributed search across the whole cluster and gathering all the results. -include::request/query.asciidoc[] - -include::request/from-size.asciidoc[] - -include::request/sort.asciidoc[] - -include::request/track-total-hits.asciidoc[] - -include::request/source-filtering.asciidoc[] - -include::request/stored-fields.asciidoc[] - -include::request/script-fields.asciidoc[] - include::request/docvalue-fields.asciidoc[] -include::request/post-filter.asciidoc[] - -include::request/highlighting.asciidoc[] - -include::request/rescore.asciidoc[] - -include::request/search-type.asciidoc[] - -include::request/scroll.asciidoc[] - -include::request/preference.asciidoc[] - include::request/explain.asciidoc[] -include::request/version-and-seq-no.asciidoc[] +include::request/collapse.asciidoc[] + +include::request/from-size.asciidoc[] + +include::request/highlighting.asciidoc[] include::request/index-boost.asciidoc[] +include::request/inner-hits.asciidoc[] + include::request/min-score.asciidoc[] include::request/named-queries-and-filters.asciidoc[] -include::request/inner-hits.asciidoc[] +include::request/post-filter.asciidoc[] -include::request/collapse.asciidoc[] +include::request/preference.asciidoc[] + +include::request/query.asciidoc[] + +include::request/rescore.asciidoc[] + +include::request/script-fields.asciidoc[] + +include::request/scroll.asciidoc[] include::request/search-after.asciidoc[] + +include::request/search-type.asciidoc[] + +include::request/seq-no.asciidoc[] + +include::request/sort.asciidoc[] + +include::request/source-filtering.asciidoc[] + +include::request/stored-fields.asciidoc[] + +include::request/track-total-hits.asciidoc[] + +include::request/version.asciidoc[] diff --git a/docs/reference/search/request/version-and-seq-no.asciidoc b/docs/reference/search/request/seq-no.asciidoc similarity index 60% rename from docs/reference/search/request/version-and-seq-no.asciidoc rename to docs/reference/search/request/seq-no.asciidoc index 2bca4c985b2..0ab7bec4487 100644 --- a/docs/reference/search/request/version-and-seq-no.asciidoc +++ b/docs/reference/search/request/seq-no.asciidoc @@ -15,20 +15,3 @@ GET /_search } -------------------------------------------------- // CONSOLE - -[[search-request-version]] -=== Version - -Returns a version for each search hit. - -[source,js] --------------------------------------------------- -GET /_search -{ - "version": true, - "query" : { - "term" : { "user" : "kimchy" } - } -} --------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index 2feb9313d8a..195dc39f11e 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -1,5 +1,5 @@ [[search-request-stored-fields]] -=== Fields +=== Stored Fields WARNING: The `stored_fields` parameter is about fields that are explicitly marked as stored in the mapping, which is off by default and generally not recommended. diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc new file mode 100644 index 00000000000..57c6ce27feb --- /dev/null +++ b/docs/reference/search/request/version.asciidoc @@ -0,0 +1,16 @@ +[[search-request-version]] +=== Version + +Returns a version for each search hit. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "version": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +-------------------------------------------------- +// CONSOLE From 2ac514b909a28963ac2681a57976343cd3180a8f Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 9 Apr 2019 18:38:54 +0300 Subject: [PATCH 06/60] SQL: Fix catalog filtering in SYS COLUMNS (#40583) Properly treat '%' as a wildcard for catalog filtering instead of doing a straight string match. Table filtering now considers aliases as well. Add escaping char for LIKE queries with user defined params Fix monotony of ORDINAL_POSITION Add integration test for SYS COLUMNS - currently running only inside single_node since the cluster name is test dependent. Add pattern unescaping for index names Fix #40582 (cherry picked from commit 8e61b77d3f849661b7175544f471119042fe9551) --- .../xpack/sql/jdbc/JdbcDatabaseMetaData.java | 23 +-- .../xpack/sql/qa/security/JdbcSecurityIT.java | 10 +- .../sql/qa/single_node/JdbcCsvSpecIT.java | 16 ++ .../xpack/sql/qa/jdbc/CsvTestUtils.java | 2 + .../xpack/sql/qa/jdbc/JdbcTestUtils.java | 3 +- .../qa/jdbc/SpecBaseIntegrationTestCase.java | 6 +- .../setup_mock_metadata_get_columns.sql | 2 +- .../single-node-only/command-sys.csv-spec | 120 +++++++++++++ .../sql/analysis/index/IndexResolver.java | 17 +- .../plan/logical/command/sys/SysColumns.java | 56 ++++-- .../xpack/sql/util/StringUtils.java | 28 +++ .../logical/command/sys/SysColumnsTests.java | 169 ++++++++++++++---- .../logical/command/sys/SysParserTests.java | 163 ----------------- .../xpack/sql/util/LikeConversionTests.java | 28 ++- 14 files changed, 409 insertions(+), 234 deletions(-) create mode 100644 x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec delete mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index eaececff16d..4d5b6eae2e1 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -33,6 +33,8 @@ import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; */ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { + private static final String WILDCARD = "%"; + private final JdbcConnection con; JdbcDatabaseMetaData(JdbcConnection con) { @@ -713,19 +715,19 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { // null means catalog info is irrelevant // % means return all catalogs // EMPTY means return those without a catalog - return catalog == null || catalog.equals(EMPTY) || catalog.equals("%") || catalog.equals(defaultCatalog()); + return catalog == null || catalog.equals(EMPTY) || catalog.equals(WILDCARD) || catalog.equals(defaultCatalog()); } private boolean isDefaultSchema(String schema) { // null means schema info is irrelevant // % means return all schemas` // EMPTY means return those without a schema - return schema == null || schema.equals(EMPTY) || schema.equals("%"); + return schema == null || schema.equals(EMPTY) || schema.equals(WILDCARD); } @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { - String statement = "SYS TABLES CATALOG LIKE ? LIKE ?"; + String statement = "SYS TABLES CATALOG LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\' "; if (types != null && types.length > 0) { statement += " TYPE ?"; @@ -738,8 +740,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { } PreparedStatement ps = con.prepareStatement(statement); - ps.setString(1, catalog != null ? catalog.trim() : "%"); - ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); + ps.setString(1, catalog != null ? catalog.trim() : WILDCARD); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : WILDCARD); if (types != null && types.length > 0) { for (int i = 0; i < types.length; i++) { @@ -784,14 +786,15 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { return memorySet(con.cfg, columnInfo("TABLE_TYPES", "TABLE_TYPE"), data); } + @Override public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? LIKE ?"); - // TODO: until passing null works, pass an empty string - ps.setString(1, catalog != null ? catalog.trim() : EMPTY); - ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); - ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : "%"); + PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\'"); + // NB: catalog is not a pattern hence why null is send instead + ps.setString(1, catalog != null ? catalog.trim() : null); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : WILDCARD); + ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : WILDCARD); return ps.executeQuery(); } diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index c56f3b23946..a911e7d4854 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -232,13 +232,13 @@ public class JdbcSecurityIT extends SqlSecurityTestCase { public void checkNoMonitorMain(String user) throws Exception { // Without monitor/main the JDBC driver - ES server version comparison doesn't take place, which fails everything else expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user))); - expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMajorVersion()); + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMajorVersion()); expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMinorVersion()); - expectUnauthorized("cluster:monitor/main", user, + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).createStatement().executeQuery("SELECT * FROM test")); - expectUnauthorized("cluster:monitor/main", user, + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).createStatement().executeQuery("SHOW TABLES LIKE 'test'")); - expectUnauthorized("cluster:monitor/main", user, + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).createStatement().executeQuery("DESCRIBE test")); } @@ -292,7 +292,7 @@ public class JdbcSecurityIT extends SqlSecurityTestCase { expectActionMatchesAdmin( con -> con.getMetaData().getColumns(null, "%", "%t", "%"), "full_access", - con -> con.getMetaData().getColumns(null, "%", "%", "%")); + con -> con.getMetaData().getColumns(null, "%", "%t", "%")); } public void testMetaDataGetColumnsWithNoAccess() throws Exception { diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index 66ac2e2c7df..f742b1304a7 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -5,10 +5,26 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + public class JdbcCsvSpecIT extends CsvSpecTestCase { + + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + List list = new ArrayList<>(); + list.addAll(CsvSpecTestCase.readScriptSpec()); + return readScriptSpec("/single-node-only/command-sys.csv-spec", specParser()); + } + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 8cc8cf6e040..6376bd13308 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -155,6 +155,8 @@ public final class CsvTestUtils { return "timestamp"; case "bt": return "byte"; + case "sh": + return "short"; default: return type; } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 19c30b55e92..123f22073ae 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -212,7 +212,8 @@ final class JdbcTestUtils { @SuppressForbidden(reason = "need to open jar") private static JarInputStream getJarStream(URL resource) throws IOException { URLConnection con = resource.openConnection(); - con.setDefaultUseCaches(false); + // do not to cache files (to avoid keeping file handles around) + con.setUseCaches(false); return new JarInputStream(con.getInputStream()); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index 4282b97d87f..05ba49bbd0d 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.URL; +import java.net.URLConnection; import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.ResultSet; @@ -216,6 +217,9 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas @SuppressForbidden(reason = "test reads from jar") public static InputStream readFromJarUrl(URL source) throws IOException { - return source.openStream(); + URLConnection con = source.openConnection(); + // do not to cache files (to avoid keeping file handles around) + con.setUseCaches(false); + return con.getInputStream(); } } diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql index d6df2fbb9e1..5e02df28b06 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql @@ -30,7 +30,7 @@ FROM DUAL UNION ALL SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 32766, 2147483647, null, null, 1, -- columnNullable - null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 12, 0, 2147483647, 2, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL SELECT null, 'test2', 'date', 93, 'DATETIME', 29, 8, null, null, diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec new file mode 100644 index 00000000000..f6b02ba4bea --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec @@ -0,0 +1,120 @@ +// +// Sys Commands +// + +sysColumnsWithTableLikeWithEscape +SYS COLUMNS TABLE LIKE 'test\_emp' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +; + +sysColumnsWithTableLikeNoEscape +SYS COLUMNS TABLE LIKE 'test_emp'; + +// since there's no escaping test_emp means test*emp which matches also test_alias_emp +// however as there's no way to filter the matching indices, we can't exclude the field + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsWithCatalogAndLike +SYS COLUMNS CATALOG 'x-pack_plugin_sql_qa_single-node_integTestCluster' TABLE LIKE 'test\_emp\_copy' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+-------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsOnAliasWithTableLike +SYS COLUMNS TABLE LIKE 'test\_alias' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsAllTables +SYS COLUMNS TABLE LIKE '%'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |@timestamp |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_in |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |2 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_out |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_port |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |dest_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |id |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |status |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index ec2dfa46f47..367c9ea3a14 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -415,6 +415,8 @@ public class IndexResolver { GetIndexRequest getIndexRequest = createGetIndexRequest(indexWildcard); client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(getIndexResponse -> { ImmutableOpenMap> mappings = getIndexResponse.getMappings(); + ImmutableOpenMap> aliases = getIndexResponse.getAliases(); + List results = new ArrayList<>(mappings.size()); Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; for (ObjectObjectCursor> indexMappings : mappings) { @@ -425,7 +427,20 @@ public class IndexResolver { * and not the concrete index: there is a well known information leak of the concrete index name in the response. */ String concreteIndex = indexMappings.key; - if (pattern == null || pattern.matcher(concreteIndex).matches()) { + + // take into account aliases + List aliasMetadata = aliases.get(concreteIndex); + boolean matchesAlias = false; + if (pattern != null && aliasMetadata != null) { + for (AliasMetaData aliasMeta : aliasMetadata) { + if (pattern.matcher(aliasMeta.alias()).matches()) { + matchesAlias = true; + break; + } + } + } + + if (pattern == null || matchesAlias || pattern.matcher(concreteIndex).matches()) { IndexResolution getIndexResult = buildGetIndexResult(concreteIndex, concreteIndex, indexMappings.value); if (getIndexResult.isValid()) { results.add(getIndexResult.get()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index e5c80197296..68cfefe7fb5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.plan.logical.command.sys; +import org.apache.lucene.util.Counter; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.util.StringUtils; import java.sql.DatabaseMetaData; import java.util.ArrayList; @@ -101,40 +103,63 @@ public class SysColumns extends Command { String cluster = session.indexResolver().clusterName(); // bail-out early if the catalog is present but differs - if (Strings.hasText(catalog) && !cluster.equals(catalog)) { + if (Strings.hasText(catalog) && cluster.equals(catalog) == false) { listener.onResponse(Rows.empty(output)); return; } + // save original index name (as the pattern can contain special chars) + String indexName = index != null ? index : (pattern != null ? StringUtils.likeToUnescaped(pattern.pattern(), + pattern.escape()) : ""); String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; Pattern columnMatcher = columnPattern != null ? Pattern.compile(columnPattern.asJavaRegex()) : null; - session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { - List> rows = new ArrayList<>(); - for (EsIndex esIndex : esIndices) { - fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); - } + // special case fo '%' (translated to *) + if ("*".equals(idx)) { + session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { + List> rows = new ArrayList<>(); + for (EsIndex esIndex : esIndices) { + fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); + } - listener.onResponse(Rows.of(output, rows)); - }, listener::onFailure)); + listener.onResponse(Rows.of(output, rows)); + }, listener::onFailure)); + } + // otherwise use a merged mapping + else { + session.indexResolver().resolveAsMergedMapping(idx, regex, ActionListener.wrap(r -> { + List> rows = new ArrayList<>(); + // populate the data only when a target is found + if (r.isValid() == true) { + EsIndex esIndex = r.get(); + fillInRows(cluster, indexName, esIndex.mapping(), null, rows, columnMatcher, mode); + } + + listener.onResponse(Rows.of(output, rows)); + }, listener::onFailure)); + } } static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, Pattern columnMatcher, Mode mode) { - int pos = 0; + fillInRows(clusterName, indexName, mapping, prefix, rows, columnMatcher, Counter.newCounter(), mode); + } + + private static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, + Pattern columnMatcher, Counter position, Mode mode) { boolean isOdbcClient = mode == Mode.ODBC; for (Map.Entry entry : mapping.entrySet()) { - pos++; // JDBC is 1-based so we start with 1 here + position.addAndGet(1); // JDBC is 1-based so we start with 1 here String name = entry.getKey(); name = prefix != null ? prefix + "." + name : name; EsField field = entry.getValue(); DataType type = field.getDataType(); - // skip the nested, object and unsupported types for JDBC and ODBC - if (type.isPrimitive() || false == Mode.isDriver(mode)) { + // skip the nested, object and unsupported types + if (type.isPrimitive()) { if (columnMatcher == null || columnMatcher.matcher(name).matches()) { rows.add(asList(clusterName, // schema is not supported @@ -162,7 +187,7 @@ public class SysColumns extends Command { // char octet length type.isString() || type == DataType.BINARY ? type.size : null, // position - pos, + (int) position.get(), "YES", null, null, @@ -173,8 +198,9 @@ public class SysColumns extends Command { )); } } - if (field.getProperties() != null) { - fillInRows(clusterName, indexName, field.getProperties(), name, rows, columnMatcher, mode); + // skip nested fields + if (field.getProperties() != null && type != DataType.NESTED) { + fillInRows(clusterName, indexName, field.getProperties(), name, rows, columnMatcher, position, mode); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java index d2e8d3badf6..adfd3996e9f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -239,6 +239,34 @@ public final class StringUtils { return wildcard.toString(); } + public static String likeToUnescaped(String pattern, char escape) { + StringBuilder wildcard = new StringBuilder(pattern.length()); + + boolean escaped = false; + for (int i = 0; i < pattern.length(); i++) { + char curr = pattern.charAt(i); + + if (escaped == false && curr == escape && escape != 0) { + escaped = true; + } else { + if (escaped == true && (curr == '%' || curr == '_' || curr == escape)) { + wildcard.append(curr); + } else { + if (escaped) { + wildcard.append(escape); + } + wildcard.append(curr); + } + escaped = false; + } + } + // corner-case when the escape char is the last char + if (escaped == true) { + wildcard.append(escape); + } + return wildcard.toString(); + } + public static String toString(SearchSourceBuilder source) { try (XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true)) { source.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index 5d3e9d8a771..bb4fb02ea7e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -5,21 +5,58 @@ */ package org.elasticsearch.xpack.sql.plan.logical.command.sys; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.TestUtils; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.stats.Metrics; +import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; import java.sql.Types; import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.ActionListener.wrap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SysColumnsTests extends ESTestCase { + static final String CLUSTER_NAME = "cluster"; + + private final SqlParser parser = new SqlParser(); + private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); + private final IndexInfo index = new IndexInfo("test_emp", IndexType.INDEX); + private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); + + public void testSysColumns() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); - assertEquals(17, rows.size()); + // nested fields are ignored + assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -54,81 +91,57 @@ public class SysColumnsTests extends ESTestCase { assertEquals(8, bufferLength(row)); row = rows.get(5); - assertEquals("unsupported", name(row)); - assertEquals(Types.OTHER, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(0, bufferLength(row)); - - row = rows.get(6); - assertEquals("some", name(row)); - assertEquals(Types.STRUCT, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(-1, bufferLength(row)); - - row = rows.get(7); - assertEquals("some.dotted", name(row)); - assertEquals(Types.STRUCT, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(-1, bufferLength(row)); - - row = rows.get(8); assertEquals("some.dotted.field", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - - row = rows.get(9); + + row = rows.get(6); assertEquals("some.string", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(10); + row = rows.get(7); assertEquals("some.string.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(11); + row = rows.get(8); assertEquals("some.string.typical", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(12); + row = rows.get(9); assertEquals("some.ambiguous", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(13); + row = rows.get(10); assertEquals("some.ambiguous.one", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(14); + row = rows.get(11); assertEquals("some.ambiguous.two", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(15); + row = rows.get(12); assertEquals("some.ambiguous.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - - row = rows.get(16); - assertEquals("foo_type", name(row)); - assertEquals(Types.OTHER, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(0, bufferLength(row)); } public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.ODBC); assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); @@ -263,7 +276,7 @@ public class SysColumnsTests extends ESTestCase { public void testSysColumnsInJdbcMode() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.JDBC); assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); @@ -431,4 +444,88 @@ public class SysColumnsTests extends ESTestCase { private static Object sqlDataTypeSub(List list) { return list.get(14); } -} + + public void testSysColumnsNoArg() throws Exception { + executeCommand("SYS COLUMNS", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + // no index specified + assertEquals("", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + // no index specified + assertEquals("", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithCatalogWildcard() throws Exception { + executeCommand("SYS COLUMNS CATALOG 'cluster' TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithMissingCatalog() throws Exception { + executeCommand("SYS COLUMNS TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithNullCatalog() throws Exception { + executeCommand("SYS COLUMNS CATALOG ? TABLE LIKE 'test' LIKE '%'", singletonList(new SqlTypedParamValue("keyword", null)), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + @SuppressWarnings({ "unchecked" }) + private void executeCommand(String sql, List params, Consumer consumer, Map mapping) + throws Exception { + Tuple tuple = sql(sql, params, mapping); + + IndexResolver resolver = tuple.v2().indexResolver(); + + EsIndex test = new EsIndex("test", mapping); + + doAnswer(invocation -> { + ((ActionListener) invocation.getArguments()[2]).onResponse(IndexResolution.valid(test)); + return Void.TYPE; + }).when(resolver).resolveAsMergedMapping(any(), any(), any()); + + tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); + } + + private Tuple sql(String sql, List params, Map mapping) { + EsIndex test = new EsIndex("test", mapping); + Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), + new Verifier(new Metrics())); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql, params), true); + + IndexResolver resolver = mock(IndexResolver.class); + when(resolver.clusterName()).thenReturn(CLUSTER_NAME); + + SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); + return new Tuple<>(cmd, session); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java deleted file mode 100644 index 110c320d679..00000000000 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.plan.logical.command.sys; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.TestUtils; -import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; -import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; -import org.elasticsearch.xpack.sql.analysis.index.EsIndex; -import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; -import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.sql.parser.SqlParser; -import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.stats.Metrics; -import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.EsField; -import org.elasticsearch.xpack.sql.type.TypesTests; - -import java.util.List; -import java.util.Map; - -import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SysParserTests extends ESTestCase { - - private final SqlParser parser = new SqlParser(); - private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); - - @SuppressWarnings({ "rawtypes", "unchecked" }) - private Tuple sql(String sql) { - EsIndex test = new EsIndex("test", mapping); - Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), - new Verifier(new Metrics())); - Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), true); - - IndexResolver resolver = mock(IndexResolver.class); - when(resolver.clusterName()).thenReturn("cluster"); - - doAnswer(invocation -> { - ((ActionListener) invocation.getArguments()[2]).onResponse(singletonList(test)); - return Void.TYPE; - }).when(resolver).resolveAsSeparateMappings(any(), any(), any()); - - SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); - return new Tuple<>(cmd, session); - } - - public void testSysTypes() { - Command cmd = sql("SYS TYPES").v1(); - - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "FLOAT", "DOUBLE", "SCALED_FLOAT", - "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "TIME", "DATETIME", - "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", - "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", - "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", - "UNSUPPORTED", "OBJECT", "NESTED"); - - cmd.execute(null, ActionListener.wrap(r -> { - assertEquals(19, r.columnCount()); - assertEquals(DataType.values().length, r.size()); - assertFalse(r.schema().types().contains(DataType.NULL)); - // test numeric as signed - assertFalse(r.column(9, Boolean.class)); - // make sure precision is returned as boolean (not int) - assertFalse(r.column(10, Boolean.class)); - // no auto-increment - assertFalse(r.column(11, Boolean.class)); - - for (int i = 0; i < r.size(); i++) { - assertEquals(names.get(i), r.column(0)); - r.advanceRow(); - } - - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsNoArgs() { - runSysColumns("SYS COLUMNS"); - } - - public void testSysColumnEmptyCatalog() { - Tuple sql = sql("SYS COLUMNS CATALOG '' TABLE LIKE '%' LIKE '%'"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(22, r.size()); - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsTableOnlyCatalog() { - Tuple sql = sql("SYS COLUMNS CATALOG 'catalog'"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(0, r.size()); - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsTableOnlyPattern() { - runSysColumns("SYS COLUMNS TABLE LIKE 'test'"); - } - - public void testSysColsColOnlyPattern() { - runSysColumns("SYS COLUMNS LIKE '%'"); - } - - public void testSysColsTableAndColsPattern() { - runSysColumns("SYS COLUMNS TABLE LIKE 'test' LIKE '%'"); - } - - - private void runSysColumns(String commandVariation) { - Tuple sql = sql(commandVariation); - List names = asList("bool", - "int", - "text", - "keyword", - "unsupported", - "date", - "some", - "some.dotted", - "some.dotted.field", - "some.string", - "some.string.normalized", - "some.string.typical", - "some.ambiguous", - "some.ambiguous.one", - "some.ambiguous.two", - "some.ambiguous.normalized", - "dep", - "dep.dep_name", - "dep.dep_id", - "dep.dep_id.keyword", - "dep.end_date", - "dep.start_date"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(22, r.size()); - - for (int i = 0; i < r.size(); i++) { - assertEquals("cluster", r.column(0)); - assertNull(r.column(1)); - assertEquals("test", r.column(2)); - assertEquals(names.get(i), r.column(3)); - r.advanceRow(); - } - - }, ex -> fail(ex.getMessage()))); - } -} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java index 19a544c14e5..29cbb9b985f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase; import static org.elasticsearch.xpack.sql.util.StringUtils.likeToJavaPattern; import static org.elasticsearch.xpack.sql.util.StringUtils.likeToLuceneWildcard; +import static org.elasticsearch.xpack.sql.util.StringUtils.likeToUnescaped; public class LikeConversionTests extends ESTestCase { @@ -20,6 +21,10 @@ public class LikeConversionTests extends ESTestCase { return likeToLuceneWildcard(pattern, '|'); } + private static String unescape(String pattern) { + return likeToUnescaped(pattern, '|'); + } + public void testNoRegex() { assertEquals("^fooBar$", regex("fooBar")); } @@ -103,4 +108,25 @@ public class LikeConversionTests extends ESTestCase { public void testWildcardIgnoreDoubleEscapedButSkipEscapingOfSql() { assertEquals("foo\\\\\\*bar\\\\?\\?", wildcard("foo\\*bar\\_?")); } -} + + public void testUnescapeLiteral() { + assertEquals("foo", unescape("foo")); + } + + public void testUnescapeEscaped() { + assertEquals("foo_bar", unescape("foo|_bar")); + } + + public void testUnescapeEscapedEscape() { + assertEquals("foo|_bar", unescape("foo||_bar")); + } + + public void testUnescapeLastCharEscape() { + assertEquals("foo_bar|", unescape("foo|_bar|")); + } + + public void testUnescapeMultipleEscapes() { + assertEquals("foo|_bar|", unescape("foo|||_bar||")); + } + +} \ No newline at end of file From fdc1bdd4d3f33dc07008dcd15d8e9d806a1865a4 Mon Sep 17 00:00:00 2001 From: Ed Savage <32410745+edsavage@users.noreply.github.com> Date: Tue, 9 Apr 2019 16:47:21 +0100 Subject: [PATCH 07/60] [ML][TEST] Fix randomly failing HLRC test (#40973) Made changes to ensure that unique IDs are generated for model snapshots used by the deleteExpiredDataTest test in the MachineLearningIT suite. Previously a sleep of 1s was performed between jobs under the assumption that this would be sufficient to guarantee that the timestamps used in the composition of the snapshot IDs would be different. The new approach is to wait on the condition that the old and new timestamps are in fact different (to 1s resolution). --- .../elasticsearch/client/MachineLearningIT.java | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 07d7187fd1d..f7b7b148f66 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -878,6 +878,18 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { waitForJobToClose(jobId); + long prevJobTimeStamp = System.currentTimeMillis() / 1000; + + // Check that the current timestamp component, in seconds, differs from previously. + // Note that we used to use an 'awaitBusy(() -> false, 1, TimeUnit.SECONDS);' + // for the same purpose but the new approach... + // a) explicitly checks that the timestamps, in seconds, are actually different and + // b) is slightly more efficient since we may not need to wait an entire second for the timestamp to increment + assertBusy(() -> { + long timeNow = System.currentTimeMillis() / 1000; + assertFalse(prevJobTimeStamp >= timeNow); + }); + // Update snapshot timestamp to force it out of snapshot retention window long oneDayAgo = nowMillis - TimeValue.timeValueHours(24).getMillis() - 1; updateModelSnapshotTimestamp(jobId, String.valueOf(oneDayAgo)); @@ -1418,6 +1430,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { } private void updateModelSnapshotTimestamp(String jobId, String timestamp) throws Exception { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); GetModelSnapshotsRequest getModelSnapshotsRequest = new GetModelSnapshotsRequest(jobId); @@ -1435,9 +1448,6 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + jobId, "_doc", documentId); updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON); highLevelClient().update(updateSnapshotRequest, RequestOptions.DEFAULT); - - // Wait a second to ensure subsequent model snapshots will have a different ID (it depends on epoch seconds) - awaitBusy(() -> false, 1, TimeUnit.SECONDS); } From e71db0531eb763ce91a82e8c7f1820719cc7d86c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 9 Apr 2019 19:40:01 +0200 Subject: [PATCH 08/60] Fix Race in AsyncTwoPhaseIndexerTests.testStateMachine (#40947) (#41013) * The step is incremented by the listner in `org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexerTests.MockIndexer#onFinish` after isFinished is set to true, but the test only waited for `isFinished`, fixed by calling `isFinished` last * Also made `step` volatile since we are reading it from different thread from the one incrementing it * Closes #40946 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 39a8807c2d5..f7f97288b23 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -39,7 +39,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { private final CountDownLatch latch; // test the execution order - private int step; + private volatile int step; protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition, CountDownLatch latch) { @@ -113,8 +113,8 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { protected void onFinish(ActionListener listener) { assertThat(step, equalTo(4)); ++step; - isFinished.set(true); listener.onResponse(null); + isFinished.set(true); } @Override @@ -206,8 +206,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/40946") - public void testStateMachine() throws InterruptedException { + public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); isFinished.set(false); From ebba9393c1abca0305ecbb2d883b2d2b1a56acc8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 13:50:47 -0400 Subject: [PATCH 09/60] Fix unsafe publication of invalid license enforcer (#40985) The invalid license enforced is exposed to the cluster state update thread (via the license state listener) before the constructor has finished. This violates the JLS for safe publication of an object, and means there is a concurrency bug lurking here. This commit addresses this by avoiding publication of the invalid license enforcer before the constructor has returned. --- .../xpack/ml/InvalidLicenseEnforcer.java | 24 ++++++++++++++++--- .../xpack/ml/MachineLearning.java | 6 +++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java index 35ec721a947..bff85d691b4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java @@ -3,17 +3,19 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.license.LicenseStateListener; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; -public class InvalidLicenseEnforcer { +public class InvalidLicenseEnforcer implements LicenseStateListener { private static final Logger logger = LogManager.getLogger(InvalidLicenseEnforcer.class); @@ -22,17 +24,32 @@ public class InvalidLicenseEnforcer { private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; + private volatile boolean licenseStateListenerRegistered; + InvalidLicenseEnforcer(XPackLicenseState licenseState, ThreadPool threadPool, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager) { this.threadPool = threadPool; this.licenseState = licenseState; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; - licenseState.addListener(this::closeJobsAndDatafeedsIfLicenseExpired); } - private void closeJobsAndDatafeedsIfLicenseExpired() { + void listenForLicenseStateChanges() { + /* + * Registering this as a listener can not be done in the constructor because otherwise it would be unsafe publication of this. That + * is, it would expose this to another thread before the constructor had finished. Therefore, we have a dedicated method to register + * the listener that is invoked after the constructor has returned. + */ + assert licenseStateListenerRegistered == false; + licenseState.addListener(this); + licenseStateListenerRegistered = true; + } + + @Override + public void licenseStateChanged() { + assert licenseStateListenerRegistered; if (licenseState.isMachineLearningAllowed() == false) { + // if the license has expired, close jobs and datafeeds threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -47,4 +64,5 @@ public class InvalidLicenseEnforcer { }); } } + } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 3d59a5fb45a..b69e7b786a7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -461,8 +461,10 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, autodetectProcessManager, memoryTracker); - // This object's constructor attaches to the license state, so there's no need to retain another reference to it - new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + // this object registers as a license state listener, and is never removed, so there's no need to retain another reference to it + final InvalidLicenseEnforcer enforcer = + new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + enforcer.listenForLicenseStateChanges(); // run node startup tasks autodetectProcessManager.onNodeStartup(); From 321f93c4f95255536ae85cd92f9a15a79e48674e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 14:24:54 -0400 Subject: [PATCH 10/60] Wait for all listeners in checkpoint listeners test It could be that we try to shutdown the executor pool before all the listeners have been invoked. It can happen that one was not invoked if it timed out and was in the process of being notified that it timed out on the executor. If we do this shutdown then, a listener will be met with rejected execution exception. To address this, we first wait until all listeners have been notified (or timed out) before proceeding with shutting down the executor. Relates #40970 --- .../shard/GlobalCheckpointListenersTests.java | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java index 59c3553d25f..d71bade29a3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java @@ -431,7 +431,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { assertThat(count.get(), equalTo(numberOfListeners)); } - public void testConcurrency() throws BrokenBarrierException, InterruptedException { + public void testConcurrency() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, 8)); final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); final AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED); @@ -470,11 +470,12 @@ public class GlobalCheckpointListenersTests extends ESTestCase { // sometimes this will notify the listener immediately globalCheckpointListeners.add( globalCheckpoint.get(), - maybeMultipleInvocationProtectingListener((g, e) -> { - if (invocation.compareAndSet(false, true) == false) { - throw new IllegalStateException("listener invoked twice"); - } - }), + maybeMultipleInvocationProtectingListener( + (g, e) -> { + if (invocation.compareAndSet(false, true) == false) { + throw new IllegalStateException("listener invoked twice"); + } + }), randomBoolean() ? null : TimeValue.timeValueNanos(randomLongBetween(1, TimeUnit.MICROSECONDS.toNanos(1)))); } // synchronize ending with the updating thread and the main test thread @@ -491,11 +492,13 @@ public class GlobalCheckpointListenersTests extends ESTestCase { globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint.incrementAndGet()); } assertThat(globalCheckpointListeners.pendingListeners(), equalTo(0)); - executor.shutdown(); - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + // wait for all the listeners to be notified for (final AtomicBoolean invocation : invocations) { - assertTrue(invocation.get()); + assertBusy(() -> assertTrue(invocation.get())); } + // now shutdown + executor.shutdown(); + assertTrue(executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS)); updatingThread.join(); listenersThread.join(); } From 1287c7d91f8610cd4f1b480e4a07f11649f7754a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 9 Apr 2019 11:52:50 -0700 Subject: [PATCH 11/60] [Backport] Replace usages RandomizedTestingTask with built-in Gradle Test (#40978) (#40993) * Replace usages RandomizedTestingTask with built-in Gradle Test (#40978) This commit replaces the existing RandomizedTestingTask and supporting code with Gradle's built-in JUnit support via the Test task type. Additionally, the previous workaround to disable all tasks named "test" and create new unit testing tasks named "unitTest" has been removed such that the "test" task now runs unit tests as per the normal Gradle Java plugin conventions. (cherry picked from commit 323f312bbc829a63056a79ebe45adced5099f6e6) * Fix forking JVM runner * Don't bump shadow plugin version --- benchmarks/build.gradle | 2 +- buildSrc/build.gradle | 12 +- .../junit4/BalancersConfiguration.groovy | 53 --- .../junit4/ListenersConfiguration.groovy | 25 -- .../junit4/RandomizedTestingPlugin.groovy | 60 --- .../junit4/RandomizedTestingTask.groovy | 330 ---------------- .../junit4/SlowTestsConfiguration.groovy | 14 - .../StackTraceFiltersConfiguration.groovy | 14 - .../junit4/TestLoggingConfiguration.groovy | 43 -- .../gradle/junit4/TestProgressLogger.groovy | 193 --------- .../gradle/junit4/TestReportLogger.groovy | 369 ------------------ .../elasticsearch/gradle/BuildPlugin.groovy | 264 +++++++------ .../gradle}/LoggingOutputStream.groovy | 2 +- .../gradle/test/RestIntegTestTask.groovy | 75 +++- .../test/StandaloneRestTestPlugin.groovy | 47 ++- .../gradle/test/StandaloneTestPlugin.groovy | 26 +- .../vagrant/TapLoggerOutputStream.groovy | 2 +- .../vagrant/VagrantLoggerOutputStream.groovy | 2 +- .../precommit/TestingConventionsTasks.java | 39 -- .../test/ErrorReportingTestListener.java | 266 +++++++++++++ .../testfixtures/TestFixturesPlugin.java | 28 +- ...carrotsearch.randomized-testing.properties | 1 - .../precommit/TestingConventionsTasksIT.java | 7 +- .../testKit/elasticsearch.build/build.gradle | 2 +- .../testKit/testingConventions/build.gradle | 14 +- client/benchmark/build.gradle | 2 +- .../build.gradle | 2 +- client/test/build.gradle | 2 +- .../archives/integ-test-zip/build.gradle | 2 +- .../tools/java-version-checker/build.gradle | 2 +- distribution/tools/plugin-cli/build.gradle | 2 +- libs/cli/build.gradle | 2 +- .../org/elasticsearch/bootstrap/JarHell.java | 27 +- libs/plugin-classloader/build.gradle | 2 +- modules/lang-painless/build.gradle | 4 +- modules/lang-painless/spi/build.gradle | 2 +- modules/reindex/build.gradle | 6 +- modules/transport-netty4/build.gradle | 2 +- plugins/discovery-ec2/build.gradle | 2 +- plugins/discovery-gce/build.gradle | 2 +- .../examples/custom-suggester/build.gradle | 2 +- .../examples/painless-whitelist/build.gradle | 2 +- plugins/examples/rest-handler/build.gradle | 4 +- .../script-expert-scoring/build.gradle | 2 +- plugins/repository-hdfs/build.gradle | 9 +- plugins/repository-s3/build.gradle | 5 +- qa/die-with-dignity/build.gradle | 6 +- qa/evil-tests/build.gradle | 2 +- qa/full-cluster-restart/build.gradle | 2 +- qa/logging-config/build.gradle | 4 +- qa/mixed-cluster/build.gradle | 2 +- qa/multi-cluster-search/build.gradle | 2 +- qa/rolling-upgrade/build.gradle | 2 +- qa/unconfigured-node-name/build.gradle | 2 +- qa/vagrant/build.gradle | 2 +- qa/verify-version-constants/build.gradle | 2 +- qa/wildfly/build.gradle | 2 +- rest-api-spec/build.gradle | 2 +- server/build.gradle | 13 +- .../bootstrap/test-framework.policy | 19 + test/fixtures/hdfs-fixture/build.gradle | 2 +- test/fixtures/krb5kdc-fixture/build.gradle | 2 +- test/fixtures/old-elasticsearch/build.gradle | 2 +- test/framework/build.gradle | 5 +- .../bootstrap/BootstrapForTesting.java | 10 +- .../junit/listeners/ReproduceInfoPrinter.java | 22 +- x-pack/plugin/ccr/build.gradle | 37 +- x-pack/plugin/ccr/qa/build.gradle | 2 +- .../downgrade-to-basic-license/build.gradle | 6 +- .../plugin/ccr/qa/multi-cluster/build.gradle | 8 +- .../ccr/qa/non-compliant-license/build.gradle | 4 +- x-pack/plugin/ccr/qa/rest/build.gradle | 2 +- x-pack/plugin/ccr/qa/restart/build.gradle | 6 +- x-pack/plugin/ccr/qa/security/build.gradle | 4 +- x-pack/plugin/core/build.gradle | 5 +- x-pack/plugin/ilm/qa/build.gradle | 2 +- .../plugin/ilm/qa/multi-cluster/build.gradle | 6 +- x-pack/plugin/ilm/qa/rest/build.gradle | 2 +- x-pack/plugin/ml/build.gradle | 13 +- x-pack/plugin/monitoring/build.gradle | 16 +- x-pack/plugin/security/build.gradle | 2 +- x-pack/plugin/security/cli/build.gradle | 2 +- x-pack/plugin/security/qa/build.gradle | 2 +- x-pack/plugin/sql/build.gradle | 9 +- x-pack/plugin/sql/jdbc/build.gradle | 2 +- x-pack/plugin/sql/qa/build.gradle | 2 +- x-pack/plugin/sql/qa/security/build.gradle | 4 +- x-pack/plugin/sql/sql-cli/build.gradle | 2 +- x-pack/plugin/watcher/build.gradle | 2 +- x-pack/qa/evil-tests/build.gradle | 2 +- x-pack/qa/full-cluster-restart/build.gradle | 2 +- .../build.gradle | 2 +- x-pack/qa/rolling-upgrade-basic/build.gradle | 2 +- .../build.gradle | 62 +-- x-pack/qa/rolling-upgrade/build.gradle | 2 +- .../third-party/active-directory/build.gradle | 2 +- x-pack/test/idp-fixture/build.gradle | 2 +- x-pack/test/smb-fixture/build.gradle | 2 +- 98 files changed, 747 insertions(+), 1551 deletions(-) delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy delete mode 100644 buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy rename buildSrc/src/main/groovy/{com/carrotsearch/gradle/junit4 => org/elasticsearch/gradle}/LoggingOutputStream.groovy (97%) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java delete mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index cca396071c1..e85f9a56086 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -24,7 +24,7 @@ mainClassName = 'org.openjdk.jmh.Main' assemble.enabled = false archivesBaseName = 'elasticsearch-benchmarks' -unitTest.enabled = false +test.enabled = false dependencies { compile("org.elasticsearch:elasticsearch:${version}") { diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 9d25532d4ce..2175c2700d8 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -199,11 +199,11 @@ if (project != rootProject) { into localDownloads } - unitTest { + test { // The test task is configured to runtimeJava version, but build-tools doesn't support all of them, so test // with compiler instead on the ones that are too old. if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_10) { - jvm = "${project.compilerJavaHome}/bin/java" + executable = "${project.compilerJavaHome}/bin/java" } } @@ -215,8 +215,6 @@ if (project != rootProject) { } dependsOn setupLocalDownloads exclude "**/*Tests.class" - testClassesDirs = sourceSets.test.output.classesDirs - classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) // tell BuildExamplePluginsIT where to find the example plugins systemProperty ( @@ -232,11 +230,7 @@ if (project != rootProject) { if (isLuceneSnapshot) { systemProperty 'test.lucene-snapshot-revision', isLuceneSnapshot[0][1] } - String defaultParallel = System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel) - if (defaultParallel == "auto") { - defaultParallel = Math.max(Runtime.getRuntime().availableProcessors(), 4) - } - maxParallelForks defaultParallel as Integer + maxParallelForks System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel.toString()) as Integer } check.dependsOn(integTest) diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy deleted file mode 100644 index 91355bf2494..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy +++ /dev/null @@ -1,53 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.SuiteBalancer -import com.carrotsearch.ant.tasks.junit4.balancers.ExecutionTimeBalancer -import com.carrotsearch.ant.tasks.junit4.listeners.ExecutionTimesReport -import org.apache.tools.ant.types.FileSet - -class BalancersConfiguration { - // parent task, so executionTime can register an additional listener - RandomizedTestingTask task - List balancers = new ArrayList<>() - - void executionTime(Map properties) { - ExecutionTimeBalancer balancer = new ExecutionTimeBalancer() - - FileSet fileSet = new FileSet() - Object filename = properties.remove('cacheFilename') - if (filename == null) { - throw new IllegalArgumentException('cacheFilename is required for executionTime balancer') - } - fileSet.setIncludes(filename.toString()) - - File cacheDir = task.project.projectDir - Object dir = properties.remove('cacheDir') - if (dir != null) { - cacheDir = new File(dir.toString()) - } - fileSet.setDir(cacheDir) - balancer.add(fileSet) - - int historySize = 10 - Object size = properties.remove('historySize') - if (size instanceof Integer) { - historySize = (Integer)size - } else if (size != null) { - throw new IllegalArgumentException('historySize must be an integer') - } - ExecutionTimesReport listener = new ExecutionTimesReport() - listener.setFile(new File(cacheDir, filename.toString())) - listener.setHistoryLength(historySize) - - if (properties.isEmpty() == false) { - throw new IllegalArgumentException('Unknown properties for executionTime balancer: ' + properties.keySet()) - } - - task.listenersConfig.listeners.add(listener) - balancers.add(balancer) - } - - void custom(SuiteBalancer balancer) { - balancers.add(balancer) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy deleted file mode 100644 index 5fa5baa8ffb..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy +++ /dev/null @@ -1,25 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import com.carrotsearch.ant.tasks.junit4.listeners.antxml.AntXmlReport - - -class ListenersConfiguration { - RandomizedTestingTask task - List listeners = new ArrayList<>() - - void junitReport(Map props) { - AntXmlReport reportListener = new AntXmlReport() - Object dir = props == null ? null : props.get('dir') - if (dir != null) { - reportListener.setDir(task.project.file(dir)) - } else { - reportListener.setDir(new File(task.project.buildDir, 'reports' + File.separator + "${task.name}Junit")) - } - listeners.add(reportListener) - } - - void custom(AggregatedEventListener listener) { - listeners.add(listener) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy deleted file mode 100644 index 7d554386c39..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy +++ /dev/null @@ -1,60 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.JUnit4 -import org.gradle.api.Plugin -import org.gradle.api.Project -import org.gradle.api.tasks.TaskContainer - -class RandomizedTestingPlugin implements Plugin { - - void apply(Project project) { - String seed = setupSeed(project) - createUnitTestTask(project.tasks) - configureAnt(project.ant, seed) - } - - /** - * Pins the test seed at configuration time so it isn't different on every - * {@link RandomizedTestingTask} execution. This is useful if random - * decisions in one run of {@linkplain RandomizedTestingTask} influence the - * outcome of subsequent runs. Pinning the seed up front like this makes - * the reproduction line from one run be useful on another run. - */ - static String setupSeed(Project project) { - if (project.rootProject.ext.has('testSeed')) { - /* Skip this if we've already pinned the testSeed. It is important - * that this checks the rootProject so that we know we've only ever - * initialized one time. */ - return project.rootProject.ext.testSeed - } - String testSeed = System.getProperty('tests.seed') - if (testSeed == null) { - long seed = new Random(System.currentTimeMillis()).nextLong() - testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT) - } - /* Set the testSeed on the root project first so other projects can use - * it during initialization. */ - project.rootProject.ext.testSeed = testSeed - project.rootProject.subprojects { - project.ext.testSeed = testSeed - } - - return testSeed - } - - static void createUnitTestTask(TaskContainer tasks) { - // only create a unitTest task if the `test` task exists as some project don't make use of it. - tasks.matching { it.name == "test" }.all { - // We don't want to run any tests with the Gradle test runner since we add our own randomized runner - it.enabled = false - RandomizedTestingTask unitTest = tasks.create('unitTest', RandomizedTestingTask) - unitTest.description = 'Runs unit tests with the randomized testing framework' - it.dependsOn unitTest - } - } - - static void configureAnt(AntBuilder ant, String seed) { - ant.project.addTaskDefinition('junit4:junit4', JUnit4.class) - ant.properties.put('tests.seed', seed) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy deleted file mode 100644 index e5500d60093..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ /dev/null @@ -1,330 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.ListenersList -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import groovy.xml.NamespaceBuilder -import groovy.xml.NamespaceBuilderSupport -import org.apache.tools.ant.BuildException -import org.apache.tools.ant.DefaultLogger -import org.apache.tools.ant.Project -import org.apache.tools.ant.RuntimeConfigurable -import org.apache.tools.ant.UnknownElement -import org.elasticsearch.gradle.BuildPlugin -import org.gradle.api.DefaultTask -import org.gradle.api.InvalidUserDataException -import org.gradle.api.file.FileCollection -import org.gradle.api.file.FileTreeElement -import org.gradle.api.specs.Spec -import org.gradle.api.tasks.Input -import org.gradle.api.tasks.InputDirectory -import org.gradle.api.tasks.Optional -import org.gradle.api.tasks.TaskAction -import org.gradle.api.tasks.options.Option -import org.gradle.api.tasks.util.PatternFilterable -import org.gradle.api.tasks.util.PatternSet -import org.gradle.internal.logging.progress.ProgressLoggerFactory -import org.gradle.util.ConfigureUtil - -import javax.inject.Inject - -class RandomizedTestingTask extends DefaultTask { - - // TODO: change to "executable" to match gradle test params? - @Optional - @Input - String jvm = 'java' - - @Optional - @Input - File workingDir = new File(project.buildDir, 'testrun' + File.separator + name) - - @Optional - @Input - FileCollection classpath - - @Input - String parallelism = '1' - - @Input - FileCollection testClassesDirs - - @Optional - @Input - boolean haltOnFailure = true - - @Optional - @Input - boolean shuffleOnSlave = true - - @Optional - @Input - boolean enableAssertions = true - - @Optional - @Input - boolean enableSystemAssertions = true - - @Optional - @Input - boolean leaveTemporary = false - - @Optional - @Input - String ifNoTests = 'ignore' - - @Optional - @Input - String onNonEmptyWorkDirectory = 'fail' - - TestLoggingConfiguration testLoggingConfig = new TestLoggingConfiguration() - - BalancersConfiguration balancersConfig = new BalancersConfiguration(task: this) - ListenersConfiguration listenersConfig = new ListenersConfiguration(task: this) - - List jvmArgs = new ArrayList<>() - - @Optional - @Input - String argLine = null - - Map systemProperties = new HashMap<>() - Map environmentVariables = new HashMap<>() - PatternFilterable patternSet = new PatternSet() - - RandomizedTestingTask() { - outputs.upToDateWhen {false} // randomized tests are never up to date - listenersConfig.listeners.add(new TestProgressLogger(factory: getProgressLoggerFactory())) - listenersConfig.listeners.add(new TestReportLogger(logger: logger, config: testLoggingConfig)) - } - - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException() - } - - void jvmArgs(Iterable arguments) { - jvmArgs.addAll(arguments) - } - - void jvmArg(String argument) { - jvmArgs.add(argument) - } - - void systemProperty(String property, Object value) { - systemProperties.put(property, value) - } - - void environment(String key, Object value) { - environmentVariables.put(key, value) - } - - void include(String... includes) { - this.patternSet.include(includes); - } - - void include(Iterable includes) { - this.patternSet.include(includes); - } - - void include(Spec includeSpec) { - this.patternSet.include(includeSpec); - } - - void include(Closure includeSpec) { - this.patternSet.include(includeSpec); - } - - void exclude(String... excludes) { - this.patternSet.exclude(excludes); - } - - void exclude(Iterable excludes) { - this.patternSet.exclude(excludes); - } - - void exclude(Spec excludeSpec) { - this.patternSet.exclude(excludeSpec); - } - - void exclude(Closure excludeSpec) { - this.patternSet.exclude(excludeSpec); - } - - @Input - void testLogging(Closure closure) { - ConfigureUtil.configure(closure, testLoggingConfig) - } - - @Input - void balancers(Closure closure) { - ConfigureUtil.configure(closure, balancersConfig) - } - - @Input - void listeners(Closure closure) { - ConfigureUtil.configure(closure, listenersConfig) - } - - @Option( - option = "tests", - description = "Sets test class or method name to be included. This is for IDEs. Use -Dtests.class and -Dtests.method" - ) - void setTestNameIncludePattern(String testNamePattern) { - // This is only implemented to give support for IDEs running tests. There are 3 patterns expected: - // * An exact test class and method - // * An exact test class - // * A package name prefix, ending with .* - // There is no way to distinguish the first two without looking at classes, so we use the rule - // that class names start with an uppercase letter... - // TODO: this doesn't work yet, but not sure why...intellij says it is using --tests, and this work from the command line... - String[] parts = testNamePattern.split('\\.') - String lastPart = parts[parts.length - 1] - String classname - String methodname = null - if (lastPart.equals('*') || lastPart.charAt(0).isUpperCase()) { - // package name or class name, just pass through - classname = testNamePattern - } else { - // method name, need to separate - methodname = lastPart - classname = testNamePattern.substring(0, testNamePattern.length() - lastPart.length() - 1) - } - ant.setProperty('tests.class', classname) - if (methodname != null) { - ant.setProperty('tests.method', methodname) - } - } - - @TaskAction - void executeTests() { - Map attributes = [ - jvm: jvm, - parallelism: parallelism, - heartbeat: testLoggingConfig.slowTests.heartbeat, - dir: workingDir, - tempdir: new File(workingDir, 'temp'), - haltOnFailure: true, // we want to capture when a build failed, but will decide whether to rethrow later - shuffleOnSlave: shuffleOnSlave, - leaveTemporary: leaveTemporary, - ifNoTests: ifNoTests, - onNonEmptyWorkDirectory: onNonEmptyWorkDirectory, - newenvironment: true - ] - - DefaultLogger listener = null - ByteArrayOutputStream antLoggingBuffer = null - if (logger.isInfoEnabled() == false) { - // in info logging, ant already outputs info level, so we see everything - // but on errors or when debugging, we want to see info level messages - // because junit4 emits jvm output with ant logging - if (testLoggingConfig.outputMode == TestLoggingConfiguration.OutputMode.ALWAYS) { - // we want all output, so just stream directly - listener = new DefaultLogger( - errorPrintStream: System.err, - outputPrintStream: System.out, - messageOutputLevel: Project.MSG_INFO) - } else { - // we want to buffer the info, and emit it if the test fails - antLoggingBuffer = new ByteArrayOutputStream() - PrintStream stream = new PrintStream(antLoggingBuffer, true, "UTF-8") - listener = new DefaultLogger( - errorPrintStream: stream, - outputPrintStream: stream, - messageOutputLevel: Project.MSG_INFO) - } - project.ant.project.addBuildListener(listener) - } - - NamespaceBuilderSupport junit4 = NamespaceBuilder.newInstance(ant, 'junit4') - try { - junit4.junit4(attributes) { - classpath { - pathElement(path: classpath.asPath) - } - if (enableAssertions) { - jvmarg(value: '-ea') - } - if (enableSystemAssertions) { - jvmarg(value: '-esa') - } - for (String arg : jvmArgs) { - jvmarg(value: arg) - } - if (argLine != null) { - jvmarg(line: argLine) - } - testClassesDirs.each { testClassDir -> - fileset(dir: testClassDir) { - patternSet.getIncludes().each { include(name: it) } - patternSet.getExcludes().each { exclude(name: it) } - } - } - for (Map.Entry prop : systemProperties) { - if (prop.getKey().equals('tests.seed')) { - throw new InvalidUserDataException('Seed should be ' + - 'set on the project instead of a system property') - } - if (prop.getValue() instanceof Closure) { - sysproperty key: prop.getKey(), value: (prop.getValue() as Closure).call().toString() - } else { - sysproperty key: prop.getKey(), value: prop.getValue().toString() - } - } - systemProperty 'tests.seed', project.testSeed - for (Map.Entry envvar : environmentVariables) { - env key: envvar.getKey(), value: envvar.getValue().toString() - } - makeListeners() - } - } catch (BuildException e) { - if (antLoggingBuffer != null) { - logger.error('JUnit4 test failed, ant output was:') - logger.error(antLoggingBuffer.toString('UTF-8')) - } - if (haltOnFailure) { - throw e; - } - } - - if (listener != null) { - // remove the listener we added so other ant tasks dont have verbose logging! - project.ant.project.removeBuildListener(listener) - } - } - - static class ListenersElement extends UnknownElement { - AggregatedEventListener[] listeners - - ListenersElement() { - super('listeners') - setNamespace('junit4') - setQName('listeners') - } - - public void handleChildren(Object realThing, RuntimeConfigurable wrapper) { - assert realThing instanceof ListenersList - ListenersList list = (ListenersList)realThing - - for (AggregatedEventListener listener : listeners) { - list.addConfigured(listener) - } - } - } - - /** - * Makes an ant xml element for 'listeners' just as AntBuilder would, except configuring - * the element adds the already created children. - */ - def makeListeners() { - def context = ant.getAntXmlContext() - def parentWrapper = context.currentWrapper() - def parent = parentWrapper.getProxy() - UnknownElement element = new ListenersElement(listeners: listenersConfig.listeners) - element.setProject(context.getProject()) - element.setRealThing(logger) - ((UnknownElement)parent).addChild(element) - RuntimeConfigurable wrapper = new RuntimeConfigurable(element, element.getQName()) - parentWrapper.addChild(wrapper) - return wrapper.getProxy() - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy deleted file mode 100644 index 2705fdeaacb..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy +++ /dev/null @@ -1,14 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -class SlowTestsConfiguration { - int heartbeat = 0 - int summarySize = 0 - - void heartbeat(int heartbeat) { - this.heartbeat = heartbeat - } - - void summarySize(int summarySize) { - this.summarySize = summarySize - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy deleted file mode 100644 index 5e5610ab68e..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy +++ /dev/null @@ -1,14 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -class StackTraceFiltersConfiguration { - List patterns = new ArrayList<>() - List contains = new ArrayList<>() - - void regex(String pattern) { - patterns.add(pattern) - } - - void contains(String contain) { - contains.add(contain) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy deleted file mode 100644 index 97251252f54..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy +++ /dev/null @@ -1,43 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import org.gradle.api.tasks.Input -import org.gradle.util.ConfigureUtil - -class TestLoggingConfiguration { - /** Display mode for output streams. */ - static enum OutputMode { - /** Always display the output emitted from tests. */ - ALWAYS, - /** - * Display the output only if a test/ suite failed. This requires internal buffering - * so the output will be shown only after a test completes. - */ - ONERROR, - /** Don't display the output, even on test failures. */ - NEVER - } - - OutputMode outputMode = OutputMode.ONERROR - SlowTestsConfiguration slowTests = new SlowTestsConfiguration() - StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration() - - /** Summarize the first N failures at the end of the test. */ - @Input - int showNumFailuresAtEnd = 3 // match TextReport default - - void slowTests(Closure closure) { - ConfigureUtil.configure(closure, slowTests) - } - - void stackTraceFilters(Closure closure) { - ConfigureUtil.configure(closure, stackTraceFilters) - } - - void outputMode(String mode) { - outputMode = mode.toUpperCase() as OutputMode - } - - void showNumFailuresAtEnd(int n) { - showNumFailuresAtEnd = n - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy deleted file mode 100644 index 05248fc581e..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.JUnit4 -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe -import com.carrotsearch.ant.tasks.junit4.events.TestStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap -import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import org.gradle.internal.logging.progress.ProgressLogger -import org.gradle.internal.logging.progress.ProgressLoggerFactory -import org.junit.runner.Description - -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.FAILURE -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED_ASSUMPTION -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.OK - -/** - * Adapts junit4's event listeners into gradle's ProgressLogger. Note that - * junit4 guarantees (via guava) that methods on this class won't be called by - * multiple threads simultaneously which is helpful in making it simpler. - * - * Every time a test finishes this class will update the logger. It will log - * the last finished test method on the logger line until the first suite - * finishes. Once the first suite finishes it always logs the last finished - * suite. This means that in test runs with a single suite the logger will be - * updated with the test name the whole time which is useful because these runs - * usually have longer individual tests. For test runs with lots of suites the - * majority of the time is spent showing the last suite that finished which is - * more useful for those test runs because test methods there tend to be very - * quick. - */ -class TestProgressLogger implements AggregatedEventListener { - /** Factory to build a progress logger when testing starts */ - ProgressLoggerFactory factory - ProgressLogger parentProgressLogger - ProgressLogger suiteLogger - ProgressLogger testLogger - ProgressLogger[] slaveLoggers - int totalSuites - int totalSlaves - - // Counters incremented test completion. - volatile int suitesCompleted = 0 - volatile int testsCompleted = 0 - volatile int testsFailed = 0 - volatile int testsIgnored = 0 - - @Subscribe - void onStart(AggregatedStartEvent e) throws IOException { - totalSuites = e.suiteCount - totalSlaves = e.slaveCount - parentProgressLogger = factory.newOperation(TestProgressLogger) - parentProgressLogger.setDescription('Randomized test runner') - parentProgressLogger.started() - - suiteLogger = factory.newOperation(TestProgressLogger, parentProgressLogger) - suiteLogger.setDescription('Suite logger') - suiteLogger.started("Suites: 0/" + totalSuites) - testLogger = factory.newOperation(TestProgressLogger, parentProgressLogger) - testLogger.setDescription('Test logger') - testLogger.started('Tests: completed: 0, failed: 0, ignored: 0') - slaveLoggers = new ProgressLogger[e.slaveCount] - for (int i = 0; i < e.slaveCount; ++i) { - slaveLoggers[i] = factory.newOperation(TestProgressLogger, parentProgressLogger) - slaveLoggers[i].setDescription("J${i} test logger") - slaveLoggers[i].started("J${i}: initializing...") - } - } - - @Subscribe - void onChildBootstrap(ChildBootstrap e) throws IOException { - slaveLoggers[e.getSlave().id].progress("J${e.slave.id}: starting (pid ${e.slave.pidString})") - } - - @Subscribe - void onQuit(AggregatedQuitEvent e) throws IOException { - // if onStart was never called (eg no matching tests), suiteLogger and all the other loggers will be null - if (suiteLogger != null) { - suiteLogger.completed() - testLogger.completed() - for (ProgressLogger slaveLogger : slaveLoggers) { - slaveLogger.completed() - } - parentProgressLogger.completed() - } - } - - @Subscribe - void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException { - String suiteName = simpleName(e.suiteStartedEvent.description) - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${suiteName} - initializing") - } - - @Subscribe - void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException { - suitesCompleted++ - suiteLogger.progress("Suites: " + suitesCompleted + "/" + totalSuites) - } - - @Subscribe - void onTestResult(AggregatedTestResultEvent e) throws IOException { - String statusMessage - testsCompleted++ - switch (e.status) { - case ERROR: - case FAILURE: - testsFailed++ - statusMessage = "failed" - break - case IGNORED: - case IGNORED_ASSUMPTION: - testsIgnored++ - statusMessage = "ignored" - break - case OK: - String time = formatDurationInSeconds(e.executionTime) - statusMessage = "completed [${time}]" - break - default: - throw new IllegalArgumentException("Unknown test status: [${e.status}]") - } - testLogger.progress("Tests: completed: ${testsCompleted}, failed: ${testsFailed}, ignored: ${testsIgnored}") - String testName = testName(e.description) - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ${statusMessage}") - } - - @Subscribe - void onTestStarted(TestStartedEvent e) throws IOException { - String testName = testName(e.description) - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ...") - } - - @Subscribe - void onHeartbeat(HeartBeatEvent e) throws IOException { - String testName = testName(e.description) - String time = formatDurationInSeconds(e.getNoEventDuration()) - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} stalled for ${time}") - } - - /** - * Build the test name in the format of . - */ - private static String testName(Description description) { - String className = simpleName(description) - if (description == null) { - return className + "." + "" - } - return className + "." + description.methodName - } - - /** - * Extract a Class#getSimpleName style name from Class#getName style - * string. We can't just use Class#getSimpleName because junit descriptions - * don't always set the class field but they always set the className - * field. - */ - private static String simpleName(Description description) { - if (description == null) { - return "" - } - return description.className.substring(description.className.lastIndexOf('.') + 1) - } - - @Override - void setOuter(JUnit4 junit) {} -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy deleted file mode 100644 index 6ed6ecf8619..00000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy +++ /dev/null @@ -1,369 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.JUnit4 -import com.carrotsearch.ant.tasks.junit4.Pluralize -import com.carrotsearch.ant.tasks.junit4.TestsSummaryEventListener -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.base.Strings -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe -import com.carrotsearch.ant.tasks.junit4.events.EventType -import com.carrotsearch.ant.tasks.junit4.events.IEvent -import com.carrotsearch.ant.tasks.junit4.events.IStreamEvent -import com.carrotsearch.ant.tasks.junit4.events.SuiteStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.TestFinishedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap -import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.PartialOutputEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus -import com.carrotsearch.ant.tasks.junit4.events.mirrors.FailureMirror -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import com.carrotsearch.ant.tasks.junit4.listeners.StackTraceFilter -import org.apache.tools.ant.filters.TokenFilter -import org.gradle.api.logging.LogLevel -import org.gradle.api.logging.Logger -import org.junit.runner.Description - -import java.util.concurrent.atomic.AtomicInteger - -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatTime -import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode - -class TestReportLogger extends TestsSummaryEventListener implements AggregatedEventListener { - - static final String FAILURE_MARKER = " <<< FAILURES!" - - /** Status names column. */ - static EnumMap statusNames; - static { - statusNames = new EnumMap<>(TestStatus.class); - for (TestStatus s : TestStatus.values()) { - statusNames.put(s, - s == TestStatus.IGNORED_ASSUMPTION - ? "IGNOR/A" : s.toString()); - } - } - - JUnit4 owner - - /** Logger to write the report to */ - Logger logger - - TestLoggingConfiguration config - - /** Forked concurrent JVM count. */ - int forkedJvmCount - - /** Format line for JVM ID string. */ - String jvmIdFormat - - /** Output stream that logs messages to the given logger */ - LoggingOutputStream outStream - LoggingOutputStream errStream - - /** A list of failed tests, if to be displayed at the end. */ - List failedTests = new ArrayList<>() - - /** Stack trace filters. */ - StackTraceFilter stackFilter = new StackTraceFilter() - - Map suiteTimes = new HashMap<>() - boolean slowTestsFound = false - - int totalSuites - AtomicInteger suitesCompleted = new AtomicInteger() - - @Subscribe - void onStart(AggregatedStartEvent e) throws IOException { - this.totalSuites = e.getSuiteCount(); - StringBuilder info = new StringBuilder('==> Test Info: ') - info.append('seed=' + owner.getSeed() + '; ') - info.append(Pluralize.pluralize(e.getSlaveCount(), 'jvm') + '=' + e.getSlaveCount() + '; ') - info.append(Pluralize.pluralize(e.getSuiteCount(), 'suite') + '=' + e.getSuiteCount()) - logger.lifecycle(info.toString()) - - forkedJvmCount = e.getSlaveCount(); - jvmIdFormat = " J%-" + (1 + (int) Math.floor(Math.log10(forkedJvmCount))) + "d"; - - outStream = new LoggingOutputStream(logger: logger, level: LogLevel.LIFECYCLE, prefix: " 1> ") - errStream = new LoggingOutputStream(logger: logger, level: LogLevel.ERROR, prefix: " 2> ") - - for (String contains : config.stackTraceFilters.contains) { - TokenFilter.ContainsString containsFilter = new TokenFilter.ContainsString() - containsFilter.setContains(contains) - stackFilter.addContainsString(containsFilter) - } - for (String pattern : config.stackTraceFilters.patterns) { - TokenFilter.ContainsRegex regexFilter = new TokenFilter.ContainsRegex() - regexFilter.setPattern(pattern) - stackFilter.addContainsRegex(regexFilter) - } - } - - @Subscribe - void onChildBootstrap(ChildBootstrap e) throws IOException { - logger.info("Started J" + e.getSlave().id + " PID(" + e.getSlave().getPidString() + ")."); - } - - @Subscribe - void onHeartbeat(HeartBeatEvent e) throws IOException { - logger.warn("HEARTBEAT J" + e.getSlave().id + " PID(" + e.getSlave().getPidString() + "): " + - formatTime(e.getCurrentTime()) + ", stalled for " + - formatDurationInSeconds(e.getNoEventDuration()) + " at: " + - (e.getDescription() == null ? "" : formatDescription(e.getDescription()))) - slowTestsFound = true - } - - @Subscribe - void onQuit(AggregatedQuitEvent e) throws IOException { - if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { - List sublist = this.failedTests - StringBuilder b = new StringBuilder() - b.append('Tests with failures') - if (sublist.size() > config.showNumFailuresAtEnd) { - sublist = sublist.subList(0, config.showNumFailuresAtEnd) - b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")") - } - b.append(':\n') - for (Description description : sublist) { - b.append(" - ").append(formatDescription(description, true)).append('\n') - } - logger.warn(b.toString()) - } - if (config.slowTests.summarySize > 0) { - List> sortedSuiteTimes = new ArrayList<>(suiteTimes.entrySet()) - Collections.sort(sortedSuiteTimes, new Comparator>() { - @Override - int compare(Map.Entry o1, Map.Entry o2) { - return o2.value - o1.value // sort descending - } - }) - LogLevel level = slowTestsFound ? LogLevel.WARN : LogLevel.INFO - int numToLog = Math.min(config.slowTests.summarySize, sortedSuiteTimes.size()) - logger.log(level, 'Slow Tests Summary:') - for (int i = 0; i < numToLog; ++i) { - logger.log(level, String.format(Locale.ENGLISH, '%6.2fs | %s', - sortedSuiteTimes.get(i).value / 1000.0, - sortedSuiteTimes.get(i).key)); - } - logger.log(level, '') // extra vertical separation - } - if (failedTests.isEmpty()) { - // summary is already printed for failures - logger.lifecycle('==> Test Summary: ' + getResult().toString()) - } - } - - @Subscribe - void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException { - if (isPassthrough()) { - SuiteStartedEvent evt = e.getSuiteStartedEvent(); - emitSuiteStart(LogLevel.LIFECYCLE, evt.getDescription()); - } - } - - @Subscribe - void onOutput(PartialOutputEvent e) throws IOException { - if (isPassthrough()) { - // We only allow passthrough output if there is one JVM. - switch (e.getEvent().getType()) { - case EventType.APPEND_STDERR: - ((IStreamEvent) e.getEvent()).copyTo(errStream); - break; - case EventType.APPEND_STDOUT: - ((IStreamEvent) e.getEvent()).copyTo(outStream); - break; - default: - break; - } - } - } - - @Subscribe - void onTestResult(AggregatedTestResultEvent e) throws IOException { - if (isPassthrough() && e.getStatus() != TestStatus.OK) { - flushOutput(); - emitStatusLine(LogLevel.ERROR, e, e.getStatus(), e.getExecutionTime()); - } - - if (!e.isSuccessful()) { - failedTests.add(e.getDescription()); - } - } - - @Subscribe - void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException { - final int completed = suitesCompleted.incrementAndGet(); - - if (e.isSuccessful() && e.getTests().isEmpty()) { - return; - } - if (config.slowTests.summarySize > 0) { - suiteTimes.put(e.getDescription().getDisplayName(), e.getExecutionTime()) - } - - LogLevel level = e.isSuccessful() && config.outputMode != OutputMode.ALWAYS ? LogLevel.INFO : LogLevel.LIFECYCLE - - // We must emit buffered test and stream events (in case of failures). - if (!isPassthrough()) { - emitSuiteStart(level, e.getDescription()) - emitBufferedEvents(level, e) - } - - // Emit a synthetic failure for suite-level errors, if any. - if (!e.getFailures().isEmpty()) { - emitStatusLine(level, e, TestStatus.ERROR, 0) - } - - if (!e.getFailures().isEmpty()) { - failedTests.add(e.getDescription()) - } - - emitSuiteEnd(level, e, completed) - } - - /** Suite prologue. */ - void emitSuiteStart(LogLevel level, Description description) throws IOException { - logger.log(level, 'Suite: ' + description.getDisplayName()); - } - - void emitBufferedEvents(LogLevel level, AggregatedSuiteResultEvent e) throws IOException { - if (config.outputMode == OutputMode.NEVER) { - return - } - - final IdentityHashMap eventMap = new IdentityHashMap<>(); - for (AggregatedTestResultEvent tre : e.getTests()) { - eventMap.put(tre.getTestFinishedEvent(), tre) - } - - final boolean emitOutput = config.outputMode == OutputMode.ALWAYS && isPassthrough() == false || - config.outputMode == OutputMode.ONERROR && e.isSuccessful() == false - - for (IEvent event : e.getEventStream()) { - switch (event.getType()) { - case EventType.APPEND_STDOUT: - if (emitOutput) ((IStreamEvent) event).copyTo(outStream); - break; - - case EventType.APPEND_STDERR: - if (emitOutput) ((IStreamEvent) event).copyTo(errStream); - break; - - case EventType.TEST_FINISHED: - assert eventMap.containsKey(event) - final AggregatedTestResultEvent aggregated = eventMap.get(event); - if (aggregated.getStatus() != TestStatus.OK) { - flushOutput(); - emitStatusLine(level, aggregated, aggregated.getStatus(), aggregated.getExecutionTime()); - } - - default: - break; - } - } - - if (emitOutput) { - flushOutput() - } - } - - void emitSuiteEnd(LogLevel level, AggregatedSuiteResultEvent e, int suitesCompleted) throws IOException { - - final StringBuilder b = new StringBuilder(); - b.append(String.format(Locale.ENGLISH, 'Completed [%d/%d]%s in %.2fs, ', - suitesCompleted, - totalSuites, - e.getSlave().slaves > 1 ? ' on J' + e.getSlave().id : '', - e.getExecutionTime() / 1000.0d)); - b.append(e.getTests().size()).append(Pluralize.pluralize(e.getTests().size(), ' test')); - - int failures = e.getFailureCount(); - if (failures > 0) { - b.append(', ').append(failures).append(Pluralize.pluralize(failures, ' failure')); - } - - int errors = e.getErrorCount(); - if (errors > 0) { - b.append(', ').append(errors).append(Pluralize.pluralize(errors, ' error')); - } - - int ignored = e.getIgnoredCount(); - if (ignored > 0) { - b.append(', ').append(ignored).append(' skipped'); - } - - if (!e.isSuccessful()) { - b.append(' <<< FAILURES!'); - } - - b.append('\n') - logger.log(level, b.toString()); - } - - /** Emit status line for an aggregated event. */ - void emitStatusLine(LogLevel level, AggregatedResultEvent result, TestStatus status, long timeMillis) throws IOException { - final StringBuilder line = new StringBuilder(); - - line.append(Strings.padEnd(statusNames.get(status), 8, ' ' as char)) - line.append(formatDurationInSeconds(timeMillis)) - if (forkedJvmCount > 1) { - line.append(String.format(Locale.ENGLISH, jvmIdFormat, result.getSlave().id)) - } - line.append(' | ') - - line.append(formatDescription(result.getDescription())) - if (!result.isSuccessful()) { - line.append(FAILURE_MARKER) - } - logger.log(level, line.toString()) - - PrintWriter writer = new PrintWriter(new LoggingOutputStream(logger: logger, level: level, prefix: ' > ')) - - if (status == TestStatus.IGNORED && result instanceof AggregatedTestResultEvent) { - writer.write('Cause: ') - writer.write(((AggregatedTestResultEvent) result).getCauseForIgnored()) - writer.flush() - } - - final List failures = result.getFailures(); - if (!failures.isEmpty()) { - int count = 0; - for (FailureMirror fm : failures) { - count++; - if (fm.isAssumptionViolation()) { - writer.write(String.format(Locale.ENGLISH, - 'Assumption #%d: %s', - count, fm.getMessage() == null ? '(no message)' : fm.getMessage())); - } else { - writer.write(String.format(Locale.ENGLISH, - 'Throwable #%d: %s', - count, - stackFilter.apply(fm.getTrace()))); - } - } - writer.flush() - } - } - - void flushOutput() throws IOException { - outStream.flush() - errStream.flush() - } - - /** Returns true if output should be logged immediately. */ - boolean isPassthrough() { - return forkedJvmCount == 1 && config.outputMode == OutputMode.ALWAYS - } - - @Override - void setOuter(JUnit4 task) { - owner = task - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 4bd740dde51..b585180bc29 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -18,13 +18,13 @@ */ package org.elasticsearch.gradle -import com.carrotsearch.gradle.junit4.RandomizedTestingTask import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.apache.commons.io.IOUtils import org.apache.tools.ant.taskdefs.condition.Os import org.eclipse.jgit.lib.Constants import org.eclipse.jgit.lib.RepositoryBuilder import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.elasticsearch.gradle.test.ErrorReportingTestListener import org.gradle.api.GradleException import org.gradle.api.InvalidUserDataException import org.gradle.api.JavaVersion @@ -40,8 +40,8 @@ import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.credentials.HttpHeaderCredentials +import org.gradle.api.execution.TaskActionListener import org.gradle.api.execution.TaskExecutionGraph -import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaPlugin import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin @@ -51,6 +51,7 @@ import org.gradle.api.tasks.bundling.Jar import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc +import org.gradle.api.tasks.testing.Test import org.gradle.authentication.http.HttpHeaderAuthentication import org.gradle.internal.jvm.Jvm import org.gradle.process.ExecResult @@ -83,7 +84,6 @@ class BuildPlugin implements Plugin { ) } project.pluginManager.apply('java') - project.pluginManager.apply('carrotsearch.randomized-testing') configureConfigurations(project) configureJars(project) // jar config must be added before info broker // these plugins add lots of info to our jars @@ -93,8 +93,12 @@ class BuildPlugin implements Plugin { project.pluginManager.apply('nebula.info-scm') project.pluginManager.apply('nebula.info-jar') + // apply global test task failure listener + project.rootProject.pluginManager.apply(TestFailureReportingPlugin) + project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) + setupSeed(project) globalBuildInfo(project) configureRepositories(project) project.ext.versions = VersionProperties.versions @@ -103,9 +107,7 @@ class BuildPlugin implements Plugin { configureJavadoc(project) configureSourcesJar(project) configurePomGeneration(project) - - applyCommonTestConfig(project) - configureTest(project) + configureTestTasks(project) configurePrecommit(project) configureDependenciesInfo(project) } @@ -904,128 +906,107 @@ class BuildPlugin implements Plugin { } } - static void applyCommonTestConfig(Project project) { - project.tasks.withType(RandomizedTestingTask) {task -> - jvm "${project.runtimeJavaHome}/bin/java" - parallelism System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel) - ifNoTests 'fail' - onNonEmptyWorkDirectory 'wipe' - leaveTemporary true - project.sourceSets.matching { it.name == "test" }.all { test -> - task.testClassesDirs = test.output.classesDirs - task.classpath = test.runtimeClasspath - } - group = JavaBasePlugin.VERIFICATION_GROUP - dependsOn 'testClasses' + static void configureTestTasks(Project project) { + // Default test task should run only unit tests + project.tasks.withType(Test).matching { it.name == 'test' }.all { + include '**/*Tests.class' + } - // Make sure all test tasks are configured properly - if (name != "test") { - project.tasks.matching { it.name == "test"}.all { testTask -> - task.shouldRunAfter testTask - } - } - if (name == "unitTest") { - include("**/*Tests.class") - } - - // TODO: why are we not passing maxmemory to junit4? - jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m') - jvmArg '-Xms' + System.getProperty('tests.heap.size', '512m') - jvmArg '-XX:+HeapDumpOnOutOfMemoryError' + // none of this stuff is applicable to the `:buildSrc` project tests + if (project.path != ':build-tools') { File heapdumpDir = new File(project.buildDir, 'heapdump') - heapdumpDir.mkdirs() - jvmArg '-XX:HeapDumpPath=' + heapdumpDir - if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) { - jvmArg '--illegal-access=warn' - } - argLine System.getProperty('tests.jvm.argline') - // we use './temp' since this is per JVM and tests are forbidden from writing to CWD - systemProperty 'java.io.tmpdir', './temp' - systemProperty 'java.awt.headless', 'true' - systemProperty 'tests.gradle', 'true' - systemProperty 'tests.artifact', project.name - systemProperty 'tests.task', path - systemProperty 'tests.security.manager', 'true' - systemProperty 'jna.nosys', 'true' - systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion() - if (project.ext.inFipsJvm) { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" - } else { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() - } - // TODO: remove setting logging level via system property - systemProperty 'tests.logger.level', 'WARN' - for (Map.Entry property : System.properties.entrySet()) { - if (property.getKey().startsWith('tests.') || - property.getKey().startsWith('es.')) { - if (property.getKey().equals('tests.seed')) { - /* The seed is already set on the project so we - * shouldn't attempt to override it. */ - continue; - } - systemProperty property.getKey(), property.getValue() + project.tasks.withType(Test) { Test test -> + File testOutputDir = new File(test.reports.junitXml.getDestination(), "output") + + doFirst { + project.mkdir(testOutputDir) + project.mkdir(heapdumpDir) + project.mkdir(test.workingDir) } - } - // TODO: remove this once ctx isn't added to update script params in 7.0 - systemProperty 'es.scripting.update.ctx_in_params', 'false' + def listener = new ErrorReportingTestListener(test.testLogging, testOutputDir) + test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener) + addTestOutputListener(listener) + addTestListener(listener) - // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM - if (project.inFipsJvm) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } + executable = "${project.runtimeJavaHome}/bin/java" + workingDir = project.file("${project.buildDir}/testrun/${test.name}") + maxParallelForks = project.rootProject.ext.defaultParallel - boolean assertionsEnabled = Boolean.parseBoolean(System.getProperty('tests.asserts', 'true')) - enableSystemAssertions assertionsEnabled - enableAssertions assertionsEnabled + exclude '**/*$*.class' - testLogging { - showNumFailuresAtEnd 25 - slowTests { - heartbeat 10 - summarySize 5 + jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}", + "-Xms${System.getProperty('tests.heap.size', '512m')}", + '-XX:+HeapDumpOnOutOfMemoryError', + "-XX:HeapDumpPath=$heapdumpDir" + + if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) { + jvmArgs '--illegal-access=warn' } - stackTraceFilters { - // custom filters: we carefully only omit test infra noise here - contains '.SlaveMain.' - regex(/^(\s+at )(org\.junit\.)/) - // also includes anonymous classes inside these two: - regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.RandomizedRunner)/) - regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.ThreadLeakControl)/) - regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.rules\.)/) - regex(/^(\s+at )(org\.apache\.lucene\.util\.TestRule)/) - regex(/^(\s+at )(org\.apache\.lucene\.util\.AbstractBeforeAfterRule)/) + + if (System.getProperty('tests.jvm.argline')) { + jvmArgs System.getProperty('tests.jvm.argline').split(" ") } - if (System.getProperty('tests.class') != null && System.getProperty('tests.output') == null) { - // if you are debugging, you want to see the output! - outputMode 'always' + + if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { + jvmArgs '-ea', '-esa' + } + + // we use './temp' since this is per JVM and tests are forbidden from writing to CWD + systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, + 'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar", + 'gradle.user.home': project.gradle.getGradleUserHomeDir(), + 'java.io.tmpdir': './temp', + 'java.awt.headless': 'true', + 'tests.gradle': 'true', + 'tests.artifact': project.name, + 'tests.task': path, + 'tests.security.manager': 'true', + 'tests.seed': project.testSeed, + 'jna.nosys': 'true', + 'compiler.java': project.ext.compilerJavaVersion.getMajorVersion() + + if (project.ext.inFipsJvm) { + systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" } else { - outputMode System.getProperty('tests.output', 'onerror') + systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + } + // TODO: remove setting logging level via system property + systemProperty 'tests.logger.level', 'WARN' + System.getProperties().each { key, value -> + if ((key.startsWith('tests.') || key.startsWith('es.'))) { + systemProperty key, value + } } - } - balancers { - executionTime cacheFilename: ".local-${project.version}-${name}-execution-times.log" - } + // TODO: remove this once ctx isn't added to update script params in 7.0 + systemProperty 'es.scripting.update.ctx_in_params', 'false' - listeners { - junitReport() - } + // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM + if (project.inFipsJvm) { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } - exclude '**/*$*.class' + testLogging { + showExceptions = true + showCauses = true + exceptionFormat = 'full' + } - project.plugins.withType(ShadowPlugin).whenPluginAdded { - // Test against a shadow jar if we made one - classpath -= project.tasks.compileJava.outputs.files - classpath += project.tasks.shadowJar.outputs.files - dependsOn project.tasks.shadowJar + project.plugins.withType(ShadowPlugin).whenPluginAdded { + // Test against a shadow jar if we made one + classpath -= project.tasks.compileJava.outputs.files + classpath += project.tasks.shadowJar.outputs.files + + dependsOn project.tasks.shadowJar + } } } } - private static String findDefaultParallel(Project project) { + private static int findDefaultParallel(Project project) { if (project.file("/proc/cpuinfo").exists()) { // Count physical cores on any Linux distro ( don't count hyper-threading ) Map socketToCore = [:] @@ -1046,7 +1027,7 @@ class BuildPlugin implements Plugin { } } }) - return socketToCore.values().sum().toString(); + return socketToCore.values().sum() } else if ('Mac OS X'.equals(System.getProperty('os.name'))) { // Ask macOS to count physical CPUs for us ByteArrayOutputStream stdout = new ByteArrayOutputStream() @@ -1055,16 +1036,9 @@ class BuildPlugin implements Plugin { args '-n', 'hw.physicalcpu' standardOutput = stdout } - return stdout.toString('UTF-8').trim(); - } - return 'auto'; - } - - /** Configures the test task */ - static Task configureTest(Project project) { - project.tasks.getByName('test') { - include '**/*Tests.class' + return Integer.parseInt(stdout.toString('UTF-8').trim()) } + return Runtime.getRuntime().availableProcessors() / 2 } private static configurePrecommit(Project project) { @@ -1094,4 +1068,58 @@ class BuildPlugin implements Plugin { deps.mappings = project.dependencyLicenses.mappings } } + + /** + * Pins the test seed at configuration time so it isn't different on every + * {@link Test} execution. This is useful if random + * decisions in one run of {@linkplain Test} influence the + * outcome of subsequent runs. Pinning the seed up front like this makes + * the reproduction line from one run be useful on another run. + */ + static String setupSeed(Project project) { + if (project.rootProject.ext.has('testSeed')) { + /* Skip this if we've already pinned the testSeed. It is important + * that this checks the rootProject so that we know we've only ever + * initialized one time. */ + return project.rootProject.ext.testSeed + } + + String testSeed = System.getProperty('tests.seed') + if (testSeed == null) { + long seed = new Random(System.currentTimeMillis()).nextLong() + testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT) + } + + project.rootProject.ext.testSeed = testSeed + return testSeed + } + + private static class TestFailureReportingPlugin implements Plugin { + @Override + void apply(Project project) { + if (project != project.rootProject) { + throw new IllegalStateException("${this.class.getName()} can only be applied to the root project.") + } + + project.gradle.addListener(new TaskActionListener() { + @Override + void beforeActions(Task task) { + + } + + @Override + void afterActions(Task task) { + if (task instanceof Test) { + ErrorReportingTestListener listener = task.extensions.findByType(ErrorReportingTestListener) + if (listener != null && listener.getFailedTests().size() > 0) { + task.logger.lifecycle("\nTests with failures:") + listener.getFailedTests().each { + task.logger.lifecycle(" - ${it.getFullName()}") + } + } + } + } + }) + } + } } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/LoggingOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy similarity index 97% rename from buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/LoggingOutputStream.groovy rename to buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy index ce0995a5a8c..e2e2b7c9544 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/LoggingOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy @@ -1,4 +1,4 @@ -package com.carrotsearch.gradle.junit4 +package org.elasticsearch.gradle import org.gradle.api.logging.LogLevel import org.gradle.api.logging.Logger diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 4d9cc38323b..df26a5a07cb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -18,30 +18,36 @@ */ package org.elasticsearch.gradle.test -import com.carrotsearch.gradle.junit4.RandomizedTestingTask import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.testclusters.ElasticsearchCluster import org.elasticsearch.gradle.testclusters.TestClustersPlugin import org.gradle.api.DefaultTask import org.gradle.api.Task import org.gradle.api.execution.TaskExecutionAdapter +import org.gradle.api.logging.Logger +import org.gradle.api.logging.Logging import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskState import org.gradle.api.tasks.options.Option +import org.gradle.api.tasks.testing.Test import org.gradle.plugins.ide.idea.IdeaPlugin +import org.gradle.process.CommandLineArgumentProvider import java.nio.charset.StandardCharsets import java.nio.file.Files import java.util.stream.Stream + /** * A wrapper task around setting up a cluster and running rest tests. */ -public class RestIntegTestTask extends DefaultTask { +class RestIntegTestTask extends DefaultTask { + + private static final Logger LOGGER = Logging.getLogger(RestIntegTestTask) protected ClusterConfiguration clusterConfig - protected RandomizedTestingTask runner + protected Test runner protected Task clusterInit @@ -52,8 +58,8 @@ public class RestIntegTestTask extends DefaultTask { @Input Boolean includePackaged = false - public RestIntegTestTask() { - runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class) + RestIntegTestTask() { + runner = project.tasks.create("${name}Runner", Test.class) super.dependsOn(runner) clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses) runner.dependsOn(clusterInit) @@ -71,35 +77,66 @@ public class RestIntegTestTask extends DefaultTask { runner.useCluster project.testClusters."$name" } + // disable the build cache for rest test tasks + // there are a number of inputs we aren't properly tracking here so we'll just not cache these for now + runner.outputs.doNotCacheIf('Caching is disabled for REST integration tests') { true } + // override/add more for rest tests - runner.parallelism = '1' + runner.maxParallelForks = 1 runner.include('**/*IT.class') runner.systemProperty('tests.rest.load_packaged', 'false') + /* + * We use lazy-evaluated strings in order to configure system properties whose value will not be known until + * execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated + * as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due + * to the GStrings containing references to non-serializable objects. + * + * We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added + * side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the + * build cache key or up to date checking. + */ + def nonInputProperties = new CommandLineArgumentProvider() { + private final Map systemProperties = [:] + + void systemProperty(String key, Object value) { + systemProperties.put(key, value) + } + + @Override + Iterable asArguments() { + return systemProperties.collect { key, value -> + "-D${key}=${value.toString()}".toString() + } + } + } + runner.jvmArgumentProviders.add(nonInputProperties) + runner.ext.nonInputProperties = nonInputProperties + if (System.getProperty("tests.rest.cluster") == null) { if (System.getProperty("tests.cluster") != null) { throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") } if (usesTestclusters == true) { ElasticsearchCluster cluster = project.testClusters."${name}" - runner.systemProperty('tests.rest.cluster', {cluster.allHttpSocketURI.join(",") }) - runner.systemProperty('tests.config.dir', {cluster.singleNode().getConfigDir()}) - runner.systemProperty('tests.cluster', {cluster.transportPortURI}) + nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") + nonInputProperties.systemProperty('tests.config.dir', "${-> cluster.singleNode().getConfigDir() }") + nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") } else { // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node - runner.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") - runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") + nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") + nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops - runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") // dump errors and warnings from cluster log on failure TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { @Override void afterExecute(Task task, TaskState state) { - if (state.failure != null) { + if (task == runner && state.failure != null) { for (NodeInfo nodeInfo : nodes) { printLogExcerpt(nodeInfo) } @@ -194,9 +231,9 @@ public class RestIntegTestTask extends DefaultTask { /** Print out an excerpt of the log from the given node. */ protected static void printLogExcerpt(NodeInfo nodeInfo) { File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log") - println("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:") - println("(full log at ${logFile})") - println('-----------------------------------------') + LOGGER.lifecycle("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:") + LOGGER.lifecycle("(full log at ${logFile})") + LOGGER.lifecycle('-----------------------------------------') Stream stream = Files.lines(logFile.toPath(), StandardCharsets.UTF_8) try { boolean inStartup = true @@ -211,9 +248,9 @@ public class RestIntegTestTask extends DefaultTask { } if (inStartup || inExcerpt) { if (linesSkipped != 0) { - println("... SKIPPED ${linesSkipped} LINES ...") + LOGGER.lifecycle("... SKIPPED ${linesSkipped} LINES ...") } - println(line) + LOGGER.lifecycle(line) linesSkipped = 0 } else { ++linesSkipped @@ -225,7 +262,7 @@ public class RestIntegTestTask extends DefaultTask { } finally { stream.close() } - println('=========================================') + LOGGER.lifecycle('=========================================') } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 9e41466ebdd..2a858206ebd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -20,7 +20,8 @@ package org.elasticsearch.gradle.test -import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin + +import groovy.transform.CompileStatic import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.VersionProperties @@ -28,48 +29,66 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project +import org.gradle.api.artifacts.Configuration import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.plugins.JavaPlugin +import org.gradle.api.tasks.SourceSet +import org.gradle.api.tasks.SourceSetContainer import org.gradle.api.tasks.compile.JavaCompile +import org.gradle.api.tasks.testing.Test +import org.gradle.plugins.ide.eclipse.model.EclipseModel +import org.gradle.plugins.ide.idea.model.IdeaModel + /** * Configures the build to compile tests against Elasticsearch's test framework * and run REST tests. Use BuildPlugin if you want to build main code as well * as tests. */ -public class StandaloneRestTestPlugin implements Plugin { +@CompileStatic +class StandaloneRestTestPlugin implements Plugin { @Override - public void apply(Project project) { + void apply(Project project) { if (project.pluginManager.hasPlugin('elasticsearch.build')) { throw new InvalidUserDataException('elasticsearch.standalone-test ' + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' + 'are mutually exclusive') } project.pluginManager.apply(JavaBasePlugin) - project.pluginManager.apply(RandomizedTestingPlugin) project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) BuildPlugin.globalBuildInfo(project) BuildPlugin.configureRepositories(project) - BuildPlugin.applyCommonTestConfig(project) + BuildPlugin.configureTestTasks(project) // only setup tests to build - project.sourceSets.create('test') + SourceSetContainer sourceSets = project.extensions.getByType(SourceSetContainer) + SourceSet testSourceSet = sourceSets.create('test') + + project.tasks.withType(Test) { Test test -> + test.testClassesDirs = testSourceSet.output.classesDirs + test.classpath = testSourceSet.runtimeClasspath + } + // create a compileOnly configuration as others might expect it project.configurations.create("compileOnly") project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") - project.eclipse.classpath.sourceSets = [project.sourceSets.test] - project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] - project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs - project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]] + EclipseModel eclipse = project.extensions.getByType(EclipseModel) + eclipse.classpath.sourceSets = [testSourceSet] + eclipse.classpath.plusConfigurations = [project.configurations.getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME)] + + IdeaModel idea = project.extensions.getByType(IdeaModel) + idea.module.testSourceDirs += testSourceSet.java.srcDirs + idea.module.scopes.put('TEST', [plus: [project.configurations.getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME)]] as Map>) PrecommitTasks.create(project, false) - project.check.dependsOn(project.precommit) + project.tasks.getByName('check').dependsOn(project.tasks.getByName('precommit')) - project.tasks.withType(JavaCompile) { + project.tasks.withType(JavaCompile) { JavaCompile task -> // This will be the default in Gradle 5.0 - if (options.compilerArgs.contains("-processor") == false) { - options.compilerArgs << '-proc:none' + if (task.options.compilerArgs.contains("-processor") == false) { + task.options.compilerArgs << '-proc:none' } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index 95818240cda..ccdffd6458a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -19,34 +19,30 @@ package org.elasticsearch.gradle.test -import com.carrotsearch.gradle.junit4.RandomizedTestingTask +import groovy.transform.CompileStatic import org.elasticsearch.gradle.BuildPlugin import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.tasks.testing.Test /** * Configures the build to compile against Elasticsearch's test framework and * run integration and unit tests. Use BuildPlugin if you want to build main * code as well as tests. */ -public class StandaloneTestPlugin implements Plugin { +@CompileStatic +class StandaloneTestPlugin implements Plugin { @Override - public void apply(Project project) { + void apply(Project project) { project.pluginManager.apply(StandaloneRestTestPlugin) - Map testOptions = [ - name: 'test', - type: RandomizedTestingTask, - dependsOn: 'testClasses', - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Runs unit tests that are separate' - ] - RandomizedTestingTask test = project.tasks.create(testOptions) + Test test = project.tasks.create('test', Test) + test.group = JavaBasePlugin.VERIFICATION_GROUP + test.description = 'Runs unit tests that are separate' + BuildPlugin.configureCompile(project) - test.classpath = project.sourceSets.test.runtimeClasspath - test.testClassesDirs = project.sourceSets.test.output.classesDirs - test.mustRunAfter(project.precommit) - project.check.dependsOn(test) + test.mustRunAfter(project.tasks.getByName('precommit')) + project.tasks.getByName('check').dependsOn(test) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy index e15759a1fe5..0be294fb005 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.vagrant -import com.carrotsearch.gradle.junit4.LoggingOutputStream +import org.elasticsearch.gradle.LoggingOutputStream import org.gradle.api.GradleScriptException import org.gradle.api.logging.Logger import org.gradle.internal.logging.progress.ProgressLogger diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy index e899c017129..f3031f73c23 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.vagrant -import com.carrotsearch.gradle.junit4.LoggingOutputStream +import org.elasticsearch.gradle.LoggingOutputStream import org.gradle.internal.logging.progress.ProgressLogger /** diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java index 04e1343f4ac..b2228f5c1b1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java @@ -31,12 +31,10 @@ import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.testing.Test; -import org.gradle.api.tasks.util.PatternFilterable; import java.io.File; import java.io.IOException; import java.lang.annotation.Annotation; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.MalformedURLException; @@ -75,17 +73,6 @@ public class TestingConventionsTasks extends DefaultTask { public Map> classFilesPerEnabledTask(FileTree testClassFiles) { Map> collector = new HashMap<>(); - // RandomizedTestingTask - collector.putAll( - getProject().getTasks().withType(getRandomizedTestingTask()).stream() - .filter(Task::getEnabled) - .collect(Collectors.toMap( - Task::getPath, - task -> testClassFiles.matching(getRandomizedTestingPatternSet(task)).getFiles() - ) - ) - ); - // Gradle Test collector.putAll( getProject().getTasks().withType(Test.class).stream() @@ -279,32 +266,6 @@ public class TestingConventionsTasks extends DefaultTask { .collect(Collectors.joining("\n")); } - @SuppressWarnings("unchecked") - private PatternFilterable getRandomizedTestingPatternSet(Task task) { - try { - if ( - getRandomizedTestingTask().isAssignableFrom(task.getClass()) == false - ) { - throw new IllegalStateException("Expected " + task + " to be RandomizedTestingTask or Test but it was " + task.getClass()); - } - Method getPatternSet = task.getClass().getMethod("getPatternSet"); - return (PatternFilterable) getPatternSet.invoke(task); - } catch (NoSuchMethodException e) { - throw new IllegalStateException("Expecte task to have a `patternSet` " + task, e); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalStateException("Failed to get pattern set from task" + task, e); - } - } - - @SuppressWarnings("unchecked") - private Class getRandomizedTestingTask() { - try { - return (Class) Class.forName("com.carrotsearch.gradle.junit4.RandomizedTestingTask"); - } catch (ClassNotFoundException | ClassCastException e) { - throw new IllegalStateException("Failed to load randomized testing class", e); - } - } - private String checkNoneExists(String message, Stream> stream) { String problem = stream .map(each -> " * " + each.getName()) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java new file mode 100644 index 00000000000..ce806b48e56 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java @@ -0,0 +1,266 @@ +package org.elasticsearch.gradle.test; + +import org.gradle.api.internal.tasks.testing.logging.FullExceptionFormatter; +import org.gradle.api.internal.tasks.testing.logging.TestExceptionFormatter; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.testing.TestDescriptor; +import org.gradle.api.tasks.testing.TestListener; +import org.gradle.api.tasks.testing.TestOutputEvent; +import org.gradle.api.tasks.testing.TestOutputListener; +import org.gradle.api.tasks.testing.TestResult; +import org.gradle.api.tasks.testing.logging.TestLogging; + +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.io.UncheckedIOException; +import java.io.Writer; +import java.util.Deque; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +public class ErrorReportingTestListener implements TestOutputListener, TestListener { + private static final Logger LOGGER = Logging.getLogger(ErrorReportingTestListener.class); + private static final String REPRODUCE_WITH_PREFIX = "REPRODUCE WITH"; + + private final TestExceptionFormatter formatter; + private final File outputDirectory; + private Map eventWriters = new ConcurrentHashMap<>(); + private Map> reproductionLines = new ConcurrentHashMap<>(); + private Set failedTests = new LinkedHashSet<>(); + + public ErrorReportingTestListener(TestLogging testLogging, File outputDirectory) { + this.formatter = new FullExceptionFormatter(testLogging); + this.outputDirectory = outputDirectory; + } + + @Override + public void onOutput(TestDescriptor testDescriptor, TestOutputEvent outputEvent) { + TestDescriptor suite = testDescriptor.getParent(); + + // Check if this is output from the test suite itself (e.g. afterTest or beforeTest) + if (testDescriptor.isComposite()) { + suite = testDescriptor; + } + + // Hold on to any repro messages so we can report them immediately on test case failure + if (outputEvent.getMessage().startsWith(REPRODUCE_WITH_PREFIX)) { + Deque lines = reproductionLines.computeIfAbsent(Descriptor.of(suite), d -> new LinkedList<>()); + lines.add(outputEvent.getMessage()); + } + + EventWriter eventWriter = eventWriters.computeIfAbsent(Descriptor.of(suite), EventWriter::new); + eventWriter.write(outputEvent); + } + + @Override + public void beforeSuite(TestDescriptor suite) { + + } + + @Override + public void afterSuite(final TestDescriptor suite, TestResult result) { + Descriptor descriptor = Descriptor.of(suite); + + try { + // if the test suite failed, report all captured output + if (result.getResultType().equals(TestResult.ResultType.FAILURE)) { + EventWriter eventWriter = eventWriters.get(descriptor); + + if (eventWriter != null) { + // It's not explicit what the threading guarantees are for TestListener method execution so we'll + // be explicitly safe here to avoid interleaving output from multiple test suites + synchronized (this) { + // make sure we've flushed everything to disk before reading + eventWriter.flush(); + + System.err.println("\n\nSuite: " + suite); + + try (BufferedReader reader = eventWriter.reader()) { + PrintStream out = System.out; + for (String message = reader.readLine(); message != null; message = reader.readLine()) { + if (message.startsWith(" 1> ")) { + out = System.out; + } else if (message.startsWith(" 2> ")) { + out = System.err; + } + + out.println(message); + } + } + } + } + } + } catch (IOException e) { + throw new UncheckedIOException("Error reading test suite output", e); + } finally { + reproductionLines.remove(descriptor); + EventWriter writer = eventWriters.remove(descriptor); + if (writer != null) { + try { + writer.close(); + } catch (IOException e) { + LOGGER.error("Failed to close test suite output stream", e); + } + } + } + } + + @Override + public void beforeTest(TestDescriptor testDescriptor) { + + } + + @Override + public void afterTest(TestDescriptor testDescriptor, TestResult result) { + if (result.getResultType() == TestResult.ResultType.FAILURE) { + failedTests.add(Descriptor.of(testDescriptor)); + + if (testDescriptor.getParent() != null) { + // go back and fetch the reproduction line for this test failure + Deque lines = reproductionLines.get(Descriptor.of(testDescriptor.getParent())); + if (lines != null) { + String line = lines.getLast(); + if (line != null) { + System.err.print('\n' + line); + } + } + + // include test failure exception stacktraces in test suite output log + if (result.getExceptions().size() > 0) { + String message = formatter.format(testDescriptor, result.getExceptions()).substring(4); + EventWriter eventWriter = eventWriters.computeIfAbsent(Descriptor.of(testDescriptor.getParent()), EventWriter::new); + + eventWriter.write(new TestOutputEvent() { + @Override + public Destination getDestination() { + return Destination.StdErr; + } + + @Override + public String getMessage() { + return message; + } + }); + } + } + } + } + + public Set getFailedTests() { + return failedTests; + } + + /** + * Class for identifying test output sources. We use this rather than Gradle's {@link TestDescriptor} as we want + * to avoid any nasty memory leak issues that come from keeping Gradle implementation types in memory. Since we + * use this a the key for our HashMap, it's best to control the implementation as there's no guarantee that Gradle's + * various {@link TestDescriptor} implementations reliably implement equals and hashCode. + */ + public static class Descriptor { + private final String name; + private final String className; + private final String parent; + + private Descriptor(String name, String className, String parent) { + this.name = name; + this.className = className; + this.parent = parent; + } + + public static Descriptor of(TestDescriptor d) { + return new Descriptor(d.getName(), d.getClassName(), d.getParent() == null ? null : d.getParent().toString()); + } + + public String getClassName() { + return className; + } + + public String getFullName() { + return className + "." + name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Descriptor that = (Descriptor) o; + return Objects.equals(name, that.name) && + Objects.equals(className, that.className) && + Objects.equals(parent, that.parent); + } + + @Override + public int hashCode() { + return Objects.hash(name, className, parent); + } + } + + private class EventWriter implements Closeable { + private final File outputFile; + private final Writer writer; + + EventWriter(Descriptor descriptor) { + this.outputFile = new File(outputDirectory, descriptor.getClassName() + ".out"); + + FileOutputStream fos; + try { + fos = new FileOutputStream(this.outputFile); + } catch (IOException e) { + throw new UncheckedIOException("Unable to create test suite output file", e); + } + + this.writer = new PrintWriter(new BufferedOutputStream(fos)); + } + + public void write(TestOutputEvent event) { + String prefix; + if (event.getDestination() == TestOutputEvent.Destination.StdOut) { + prefix = " 1> "; + } else { + prefix = " 2> "; + } + + try { + if (event.getMessage().equals("\n")) { + writer.write(event.getMessage()); + } else { + writer.write(prefix + event.getMessage()); + } + } catch (IOException e) { + throw new UncheckedIOException("Unable to write test suite output", e); + } + } + + public void flush() throws IOException { + writer.flush(); + } + + public BufferedReader reader() { + try { + return new BufferedReader(new FileReader(outputFile)); + } catch (IOException e) { + throw new UncheckedIOException("Unable to read test suite output file", e); + } + } + + @Override + public void close() throws IOException { + writer.close(); + + // there's no need to keep this stuff on disk after suite execution + outputFile.delete(); + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 3d64578e014..b930955236f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -32,9 +32,8 @@ import org.gradle.api.Task; import org.gradle.api.plugins.BasePlugin; import org.gradle.api.plugins.ExtraPropertiesExtension; import org.gradle.api.tasks.TaskContainer; +import org.gradle.api.tasks.testing.Test; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.util.Collections; import java.util.function.BiConsumer; @@ -103,7 +102,7 @@ public class TestFixturesPlugin implements Plugin { .matching(fixtureProject -> fixtureProject.equals(project) == false) .all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); - conditionTaskByType(tasks, extension, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask")); + conditionTaskByType(tasks, extension, Test.class); conditionTaskByType(tasks, extension, getTaskClass("org.elasticsearch.gradle.test.RestIntegTestTask")); conditionTaskByType(tasks, extension, TestingConventionsTasks.class); conditionTaskByType(tasks, extension, ComposeUp.class); @@ -116,18 +115,14 @@ public class TestFixturesPlugin implements Plugin { return; } - tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> + tasks.withType(Test.class, task -> extension.fixtures.all(fixtureProject -> { - fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(buildFixture -> - task.dependsOn(buildFixture) - ); - fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(composeDown -> - task.finalizedBy(composeDown) - ); + fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(task::dependsOn); + fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(task::finalizedBy); configureServiceInfoForTask( task, fixtureProject, - (name, port) -> setSystemProperty(task, name, port) + task::systemProperty ); task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); }) @@ -182,17 +177,6 @@ public class TestFixturesPlugin implements Plugin { return hasDockerCompose && Boolean.parseBoolean(System.getProperty("tests.fixture.enabled", "true")); } - private void setSystemProperty(Task task, String name, Object value) { - try { - Method systemProperty = task.getClass().getMethod("systemProperty", String.class, Object.class); - systemProperty.invoke(task, name, value); - } catch (NoSuchMethodException e) { - throw new IllegalArgumentException("Could not find systemProperty method on RandomizedTestingTask", e); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("Could not call systemProperty method on RandomizedTestingTask", e); - } - } - private void disableTaskByType(TaskContainer tasks, Class type) { tasks.withType(type, task -> task.setEnabled(false)); } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties deleted file mode 100644 index e1a1b8297c8..00000000000 --- a/buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties +++ /dev/null @@ -1 +0,0 @@ -implementation-class=com.carrotsearch.gradle.junit4.RandomizedTestingPlugin diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java index 39ab8a6734c..c6e1e2783ce 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java @@ -62,7 +62,7 @@ public class TestingConventionsTasksIT extends GradleIntegrationTestCase { BuildResult result = runner.buildAndFail(); assertOutputContains(result.getOutput(), "Expected at least one test class included in task :empty_test_task:emptyTest, but found none.", - "Expected at least one test class included in task :empty_test_task:emptyTestRandomized, but found none." + "Expected at least one test class included in task :empty_test_task:test, but found none." ); } @@ -71,9 +71,8 @@ public class TestingConventionsTasksIT extends GradleIntegrationTestCase { .withArguments("clean", ":all_classes_in_tasks:testingConventions", "-i", "-s"); BuildResult result = runner.buildAndFail(); assertOutputContains(result.getOutput(), - "Test classes are not included in any enabled task (:all_classes_in_tasks:emptyTestRandomized):", - " * org.elasticsearch.gradle.testkit.NamingConventionIT", - " * org.elasticsearch.gradle.testkit.NamingConventionTests" + "Test classes are not included in any enabled task (:all_classes_in_tasks:test):", + " * org.elasticsearch.gradle.testkit.NamingConventionIT" ); } diff --git a/buildSrc/src/testKit/elasticsearch.build/build.gradle b/buildSrc/src/testKit/elasticsearch.build/build.gradle index 409367da314..8020935f67e 100644 --- a/buildSrc/src/testKit/elasticsearch.build/build.gradle +++ b/buildSrc/src/testKit/elasticsearch.build/build.gradle @@ -27,7 +27,7 @@ forbiddenApisTest.enabled = false // requires dependency on testing fw jarHell.enabled = false // we don't have tests for now -unitTest.enabled = false +test.enabled = false task hello { doFirst { diff --git a/buildSrc/src/testKit/testingConventions/build.gradle b/buildSrc/src/testKit/testingConventions/build.gradle index 00522450991..dd82a18085f 100644 --- a/buildSrc/src/testKit/testingConventions/build.gradle +++ b/buildSrc/src/testKit/testingConventions/build.gradle @@ -25,22 +25,16 @@ allprojects { baseClasses = [] } } - - unitTest.enabled = false } project(':empty_test_task') { task emptyTest(type: Test) { } - - task emptyTestRandomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { - - } } project(':all_classes_in_tasks') { - task emptyTestRandomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/Convention*" } } @@ -54,14 +48,14 @@ project(':not_implementing_base') { baseClass 'org.elasticsearch.gradle.testkit.Integration' } } - task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/*IT.class" include "**/*Tests.class" } } project(':valid_setup_no_base') { - task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/*IT.class" include "**/*Tests.class" } @@ -72,7 +66,7 @@ project(':tests_in_main') { } project (':valid_setup_with_base') { - task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/*IT.class" include "**/*Tests.class" } diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index f8beeafd14e..a53f1020340 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -29,7 +29,7 @@ archivesBaseName = 'client-benchmarks' mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain' // never try to invoke tests on the benchmark project - there aren't any -unitTest.enabled = false +test.enabled = false dependencies { compile 'org.apache.commons:commons-math3:3.2' diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index 2153ee3b725..60b21d2da0d 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -36,5 +36,5 @@ dependenciesInfo.enabled = false compileJava.options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked" // no unit tests -unitTest.enabled = false +test.enabled = false integTest.enabled = false diff --git a/client/test/build.gradle b/client/test/build.gradle index 25cf23672da..faf5fb7bddf 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -53,4 +53,4 @@ dependenciesInfo.enabled = false //we aren't releasing this jar thirdPartyAudit.enabled = false -unitTest.enabled = false +test.enabled = false diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index d79971907b5..f44136b2565 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -26,7 +26,7 @@ integTestRunner { * when running against an external cluster. */ if (System.getProperty("tests.rest.cluster") == null) { - systemProperty 'tests.logfile', + nonInputProperties.systemProperty 'tests.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } else { systemProperty 'tests.logfile', '--external--' diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 03ac32d20b7..c8fe9e11219 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -7,7 +7,7 @@ forbiddenApisMain { replaceSignatureFiles 'jdk-signatures' } -unitTest.enabled = false +test.enabled = false javadoc.enabled = false loggerUsageCheck.enabled = false jarHell.enabled = false diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index b5acb105407..61e3546ed89 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -35,7 +35,7 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -unitTest { +test { // TODO: find a way to add permissions for the tests in this module systemProperty 'tests.security.manager', 'false' } diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 0cd1f256c40..b1f3b338255 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -26,7 +26,7 @@ dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" } -unitTest.enabled = false +test.enabled = false // Since CLI does not depend on :server, it cannot run the jarHell task jarHell.enabled = false diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 3de0ae5117e..277698bd8cc 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -196,18 +196,23 @@ public class JarHell { // case for tests: where we have class files in the classpath final Path root = PathUtils.get(url.toURI()); final String sep = root.getFileSystem().getSeparator(); - Files.walkFileTree(root, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - String entry = root.relativize(file).toString(); - if (entry.endsWith(".class")) { - // normalize with the os separator, remove '.class' - entry = entry.replace(sep, ".").substring(0, entry.length() - ".class".length()); - checkClass(clazzes, entry, path); + + // don't try and walk class or resource directories that don't exist + // gradle will add these to the classpath even if they never get created + if (Files.exists(root)) { + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + String entry = root.relativize(file).toString(); + if (entry.endsWith(".class")) { + // normalize with the os separator, remove '.class' + entry = entry.replace(sep, ".").substring(0, entry.length() - ".class".length()); + checkClass(clazzes, entry, path); + } + return super.visitFile(file, attrs); } - return super.visitFile(file, attrs); - } - }); + }); + } } } } diff --git a/libs/plugin-classloader/build.gradle b/libs/plugin-classloader/build.gradle index 4b3e00467b7..d6af6600d34 100644 --- a/libs/plugin-classloader/build.gradle +++ b/libs/plugin-classloader/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -unitTest.enabled = false +test.enabled = false // test depend on ES core... forbiddenApisMain.enabled = false diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 9260f871e47..a8733fef90d 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -37,8 +37,8 @@ dependencyLicenses { mapping from: /asm-.*/, to: 'asm' } -unitTest { - jvmArg '-XX:-OmitStackTraceInFastThrow' +test { + jvmArgs '-XX:-OmitStackTraceInFastThrow' } /* Build Javadoc for the Java classes in Painless's public API that are in the diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 92fd70411f7..7e43a242a23 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -37,4 +37,4 @@ dependencies { } // no tests...yet? -unitTest.enabled = false +test.enabled = false diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index ca2b6e474a6..400bb3d4841 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -45,7 +45,7 @@ run { setting 'reindex.remote.whitelist', '127.0.0.1:*' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the * same JVM randomize processors and will step on each other if we allow them to @@ -148,8 +148,8 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { systemProperty "tests.fromOld", "true" /* Use a closure on the string to delay evaluation until right before we * run the integration tests so that we can be sure that the file is - * ready. */ - systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" + * ready. */ + nonInputProperties.systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" } } } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 7ba745b1aa0..e8050c637f1 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -46,7 +46,7 @@ dependencyLicenses { mapping from: /netty-.*/, to: 'netty' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index f154c3d7189..107d1ecdde3 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -64,7 +64,7 @@ task writeTestJavaPolicy { } } -unitTest { +test { dependsOn writeTestJavaPolicy // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 11d4a7e25fe..697cc3780a1 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -29,7 +29,7 @@ check { dependsOn 'qa:gce:check' } -unitTest { +test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index 7c0d694e1b5..977e467391d 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -32,4 +32,4 @@ integTestCluster { } // this plugin has no unit tests, only rest tests -tasks.unitTest.enabled = false +tasks.test.enabled = false diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index d68a9ee3976..95928c472ca 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -35,4 +35,4 @@ if (System.getProperty('tests.distribution') == null) { integTestCluster.distribution = 'oss' } -unitTest.enabled = false +test.enabled = false diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index 025e570bede..98dd093ac17 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -27,7 +27,7 @@ esplugin { } // No unit tests in this example -unitTest.enabled = false +test.enabled = false task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn testClasses @@ -40,7 +40,7 @@ integTestCluster { dependsOn exampleFixture } integTestRunner { - systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" + nonInputProperties.systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" } testingConventions.naming { diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index b054ab47a31..e9da62acdcf 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -26,5 +26,5 @@ esplugin { noticeFile rootProject.file('NOTICE.txt') } -unitTest.enabled = false +test.enabled = false diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d5addc663ca..24b03621eba 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths + apply plugin: 'elasticsearch.test.fixtures' esplugin { @@ -150,9 +151,9 @@ project.afterEvaluate { Task restIntegTestTaskRunner = project.tasks.getByName("${integTestTaskName}Runner") restIntegTestTaskRunner.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" restIntegTestTaskRunner.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" - restIntegTestTaskRunner.jvmArg "-Djava.security.krb5.conf=${krb5conf}" + restIntegTestTaskRunner.jvmArgs "-Djava.security.krb5.conf=${krb5conf}" if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { - restIntegTestTaskRunner.jvmArg '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' + restIntegTestTaskRunner.jvmArgs '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } restIntegTestTaskRunner.systemProperty ( "test.krb5.keytab.hdfs", @@ -221,7 +222,7 @@ if (fixtureSupported) { integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository' // Only include the HA integration tests for the HA test task - integTestHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) + integTestHaRunner.setIncludes(['**/Ha*TestSuiteIT.class']) } else { if (legalPath) { logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") @@ -252,7 +253,7 @@ integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') // Only include the HA integration tests for the HA test task -integTestSecureHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) +integTestSecureHaRunner.setIncludes(['**/Ha*TestSuiteIT.class']) thirdPartyAudit { ignoreMissingClasses() diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 8a2edeb78c5..d933bcef490 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -2,7 +2,6 @@ import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import org.elasticsearch.gradle.test.RestIntegTestTask -import com.carrotsearch.gradle.junit4.RandomizedTestingTask /* * Licensed to Elasticsearch under one or more contributor @@ -65,14 +64,14 @@ bundlePlugin { } } -task testRepositoryCreds(type: RandomizedTestingTask) { +task testRepositoryCreds(type: Test) { include '**/RepositoryCredentialsTests.class' include '**/S3BlobStoreRepositoryTests.class' systemProperty 'es.allow_insecure_settings', 'true' } check.dependsOn(testRepositoryCreds) -unitTest { +test { // these are tested explicitly in separate test tasks exclude '**/*CredentialsTests.class' exclude '**/S3BlobStoreRepositoryTests.class' diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 3b2e21fd557..140df6e283a 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -27,11 +27,11 @@ esplugin { integTestRunner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' - systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" + nonInputProperties.systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" + nonInputProperties.systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } -unitTest.enabled = false +test.enabled = false check.dependsOn integTest diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 62614ca36cd..2f9239e5c2f 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -31,7 +31,7 @@ dependencies { // TODO: give each evil test its own fresh JVM for more isolation. -unitTest { +test { systemProperty 'tests.security.manager', 'false' } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index a856dd1f0ec..15c44f38f7c 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -92,7 +92,7 @@ for (Version version : bwcVersions.indexCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task bwcTestSnapshots { diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 7b5682507cb..544614ef269 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -31,10 +31,10 @@ integTestCluster { } integTestRunner { - systemProperty 'tests.logfile', + nonInputProperties.systemProperty 'tests.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" } -unitTest { +test { systemProperty 'tests.security.manager', 'false' } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 40ffef5e25d..91480c4e3d2 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -60,7 +60,7 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task bwcTestSnapshots { diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 0835945499d..bca12be6754 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -57,6 +57,6 @@ task integTest { dependsOn = [mixedClusterTest] } -unitTest.enabled = false // no unit tests for multi-cluster-search, only integration tests +test.enabled = false // no unit tests for multi-cluster-search, only integration tests check.dependsOn(integTest) diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 0fa245959a8..aced5dba652 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -139,7 +139,7 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task bwcTestSnapshots { diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index 5aba0562e03..3b0faa10a7e 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -29,6 +29,6 @@ integTestCluster { } integTestRunner { - systemProperty 'tests.logfile', + nonInputProperties.systemProperty 'tests.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index bd5f3e7a2ac..ac0bfe78aad 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -71,7 +71,7 @@ forbiddenApisMain { } // we don't have additional tests for the tests themselves -tasks.unitTest.enabled = false +tasks.test.enabled = false // Tests are destructive and meant to run in a VM, they don't adhere to general conventions testingConventions.enabled = false diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 14a6b4362f1..42c2195dd13 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -52,7 +52,7 @@ for (Version version : bwcVersions.indexCompatible) { bwcTest.dependsOn(versionBwcTest) } -unitTest.enabled = false +test.enabled = false task bwcTestSnapshots { if (project.bwc_tests_enabled) { diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index fce27e6ab8a..f9e43bd45fc 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -211,7 +211,7 @@ if (!Os.isFamily(Os.FAMILY_WINDOWS)) { check.dependsOn(integTest) -unitTest.enabled = false +test.enabled = false dependencyLicenses.enabled = false dependenciesInfo.enabled = false diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 915c3a68f6e..d95ad476682 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -2,5 +2,5 @@ apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -unitTest.enabled = false +test.enabled = false jarHell.enabled = false diff --git a/server/build.gradle b/server/build.gradle index d5ad8ac58ac..bff5353f37f 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -17,8 +17,6 @@ * under the License. */ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' @@ -331,14 +329,15 @@ dependencyLicenses { } if (isEclipse == false || project.path == ":server-tests") { - task integTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { + task integTest(type: Test) { + description = 'Multi-node tests' + mustRunAfter test + include '**/*IT.class' } + check.dependsOn integTest - integTest.mustRunAfter test + task internalClusterTest { dependsOn integTest } diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index a4a33426a43..d6b15f3df43 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -56,6 +56,8 @@ grant codeBase "${codebase.randomizedtesting-runner}" { grant codeBase "${codebase.junit}" { // needed for TestClass creation permission java.lang.RuntimePermission "accessDeclaredMembers"; + // needed for test listener notifications + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; grant codeBase "${codebase.mocksocket}" { @@ -86,3 +88,20 @@ grant codeBase "${codebase.httpasyncclient}" { // rest client uses system properties which gets the default proxy permission java.net.NetPermission "getProxySelector"; }; + +grant codeBase "file:${gradle.dist.lib}/-" { + // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production + // dependency and there's no point in exercising the security policy against it + permission java.security.AllPermission; +}; + +grant codeBase "file:${gradle.worker.jar}" { + // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production + // dependency and there's no point in exercising the security policy against it + permission java.security.AllPermission; +}; + +grant { + // since the gradle test worker jar is on the test classpath, our tests should be able to read it + permission java.io.FilePermission "${gradle.worker.jar}", "read"; +}; \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index bd2dbec6649..5dab9d99915 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -23,5 +23,5 @@ dependencies { compile "org.apache.hadoop:hadoop-minicluster:2.8.1" } -unitTest.enabled = false +test.enabled = false thirdPartyAudit.enabled = false diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index 1d224942f18..9787c3527af 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -53,4 +53,4 @@ buildFixture.enabled = false project.ext.krb5Conf = { service -> file("$buildDir/shared/${service}/krb5.conf") } project.ext.krb5Keytabs = { service, fileName -> file("$buildDir/shared/${service}/keytabs/${fileName}") } -unitTest.enabled = false +test.enabled = false diff --git a/test/fixtures/old-elasticsearch/build.gradle b/test/fixtures/old-elasticsearch/build.gradle index 82948a0b3b0..5cfc02bbba3 100644 --- a/test/fixtures/old-elasticsearch/build.gradle +++ b/test/fixtures/old-elasticsearch/build.gradle @@ -24,7 +24,7 @@ a "ports" file with the port on which Elasticsearch is running. """ apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { // Just for the constants.... diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 36eaa3f8b29..e4a72f9da5f 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -16,7 +16,6 @@ * specific language governing permissions and limitations * under the License. */ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask; dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" @@ -64,12 +63,12 @@ thirdPartyAudit.ignoreMissingClasses ( 'org.jmock.core.Constraint' ) -unitTest { +test { systemProperty 'tests.gradle_index_compat_versions', bwcVersions.indexCompatible.join(',') systemProperty 'tests.gradle_wire_compat_versions', bwcVersions.wireCompatible.join(',') systemProperty 'tests.gradle_unreleased_versions', bwcVersions.unreleased.join(',') } -task integTest(type: RandomizedTestingTask) { +task integTest(type: Test) { include "**/*IT.class" } diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 28170c827b3..e035b779b3f 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -37,6 +37,7 @@ import org.junit.Assert; import java.io.InputStream; import java.net.SocketPermission; import java.net.URL; +import java.nio.file.Files; import java.nio.file.Path; import java.security.Permission; import java.security.Permissions; @@ -252,9 +253,12 @@ public class BootstrapForTesting { Set raw = JarHell.parseClassPath(); Set cooked = new HashSet<>(raw.size()); for (URL url : raw) { - boolean added = cooked.add(PathUtils.get(url.toURI()).toRealPath().toUri().toURL()); - if (added == false) { - throw new IllegalStateException("Duplicate in classpath after resolving symlinks: " + url); + Path path = PathUtils.get(url.toURI()); + if (Files.exists(path)) { + boolean added = cooked.add(path.toRealPath().toUri().toURL()); + if (added == false) { + throw new IllegalStateException("Duplicate in classpath after resolving symlinks: " + url); + } } } return raw; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 58e126b4bed..b1a4c42cbfd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.junit.listeners; import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; @@ -38,6 +37,7 @@ import java.util.TimeZone; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_PREFIX; +import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTCLASS; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTMETHOD; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_BLACKLIST; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_SUITE; @@ -77,8 +77,14 @@ public class ReproduceInfoPrinter extends RunListener { final String gradlew = Constants.WINDOWS ? "gradlew" : "./gradlew"; final StringBuilder b = new StringBuilder("REPRODUCE WITH: " + gradlew + " "); String task = System.getProperty("tests.task"); - // TODO: enforce (intellij still runs the runner?) or use default "test" but that won't work for integ + + // append Gradle test runner test filter string b.append(task); + b.append(" --tests \""); + b.append(failure.getDescription().getClassName()); + b.append("."); + b.append(failure.getDescription().getMethodName()); + b.append("\""); GradleMessageBuilder gradleMessageBuilder = new GradleMessageBuilder(b); gradleMessageBuilder.appendAllOpts(failure.getDescription()); @@ -106,11 +112,6 @@ public class ReproduceInfoPrinter extends RunListener { public ReproduceErrorMessageBuilder appendAllOpts(Description description) { super.appendAllOpts(description); - if (description.getMethodName() != null) { - //prints out the raw method description instead of methodName(description) which filters out the parameters - super.appendOpt(SYSPROP_TESTMETHOD(), "\"" + description.getMethodName() + "\""); - } - return appendESProperties(); } @@ -128,6 +129,11 @@ public class ReproduceInfoPrinter extends RunListener { if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there! return this; } + if (sysPropName.equals(SYSPROP_TESTCLASS())) { + //don't print out the test class, we print it ourselves in appendAllOpts + //without filtering out the parameters (needed for REST tests) + return this; + } if (sysPropName.equals(SYSPROP_TESTMETHOD())) { //don't print out the test method, we print it ourselves in appendAllOpts //without filtering out the parameters (needed for REST tests) @@ -143,7 +149,7 @@ public class ReproduceInfoPrinter extends RunListener { return this; } - public ReproduceErrorMessageBuilder appendESProperties() { + private ReproduceErrorMessageBuilder appendESProperties() { appendProperties("tests.es.logger.level"); if (inVerifyPhase()) { // these properties only make sense for integration tests diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index c3eb5b66e6a..dbe1cdf51ef 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -18,28 +16,27 @@ integTest.enabled = false // Integration Test classes that cannot run with the security manager String[] noSecurityManagerITClasses = [ "**/CloseFollowerIndexIT.class" ] -// Instead we create a separate task to run the -// tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Java fantasy integration tests', - dependsOn: unitTest.dependsOn) { - include '**/*IT.class' - exclude noSecurityManagerITClasses - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} -check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test - -task internalClusterTestNoSecurityManager(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Java fantasy integration tests with no security manager', - dependsOn: unitTest.dependsOn) { +task internalClusterTestNoSecurityManager(type: Test) { + description = 'Java fantasy integration tests with no security manager' + include noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' systemProperty 'tests.security.manager', 'false' } -internalClusterTest.dependsOn internalClusterTestNoSecurityManager + +// Instead we create a separate task to run the +// tests based on ESIntegTestCase +task internalClusterTest(type: Test) { + description = 'Java fantasy integration tests' + dependsOn internalClusterTestNoSecurityManager + mustRunAfter test + + include '**/*IT.class' + exclude noSecurityManagerITClasses + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} + +check.dependsOn internalClusterTest // add all sub-projects of the qa sub-project gradle.projectsEvaluated { diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index a70f1cbd0a7..d3e95d997c3 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -1,7 +1,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { compile project(':test:framework') diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index bba9709087a..cbf30b54d5f 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -53,11 +53,11 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + "${-> followClusterTest.getNodes().get(0).clusterName}_server.json" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 5680eb41f38..7c9c581c5be 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -36,7 +36,7 @@ middleClusterTestCluster { middleClusterTestRunner { systemProperty 'tests.target_cluster', 'middle' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" } task followClusterTest(type: RestIntegTestTask) {} @@ -54,11 +54,11 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.middle_host', "${-> middleClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.middle_host', "${-> middleClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' finalizedBy 'middleClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index d3044f760fa..6d294c40755 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -33,9 +33,9 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index c890064504b..b06535a17c0 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -37,4 +37,4 @@ restTestCluster { } check.dependsOn restTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index 59082d78195..8501de714fa 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -35,7 +35,7 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" } task followClusterRestartTest(type: RestIntegTestTask) {} @@ -53,9 +53,9 @@ followClusterRestartTestCluster { followClusterRestartTestRunner { systemProperty 'tests.target_cluster', 'follow-restart' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterRestartTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index e1a735e0b26..0e082f51d71 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -68,9 +68,9 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 8828a71b06b..337bd26f37d 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -2,7 +2,6 @@ import org.elasticsearch.gradle.MavenFilteringHack import java.nio.file.Files import java.nio.file.Paths -import com.carrotsearch.gradle.junit4.RandomizedTestingTask; apply plugin: 'elasticsearch.esplugin' apply plugin: 'nebula.maven-base-publish' @@ -102,7 +101,7 @@ sourceSets.test.resources { srcDir 'src/main/config' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. @@ -142,6 +141,6 @@ thirdPartyAudit.ignoreMissingClasses ( integTest.enabled = false // There are some integ tests that don't require a cluster, we still want to run those -task internalClusterTest(type: RandomizedTestingTask) { +task internalClusterTest(type: Test) { include "**/*IT.class" } diff --git a/x-pack/plugin/ilm/qa/build.gradle b/x-pack/plugin/ilm/qa/build.gradle index f2f60527ec4..74412a094b4 100644 --- a/x-pack/plugin/ilm/qa/build.gradle +++ b/x-pack/plugin/ilm/qa/build.gradle @@ -1,7 +1,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { compile project(':test:framework') diff --git a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle index d4c24d0195e..76dbf676d73 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle @@ -50,12 +50,12 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" /* To support taking index snapshots, we have to set path.repo setting */ systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for this module, only the rest integration test +test.enabled = false // no unit tests for this module, only the rest integration test diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle index 7a79d1c20d9..c69a3dfce21 100644 --- a/x-pack/plugin/ilm/qa/rest/build.gradle +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -42,4 +42,4 @@ restTestCluster { } check.dependsOn restTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 0fe0af236f9..6ca1a44c145 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -94,12 +92,11 @@ integTest.enabled = false // Instead we create a separate task to run the // tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { - include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' +task internalClusterTest(type: Test) { + description = 'Multi-node tests' + + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' } check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index d3abb3fb2e7..5d17b81e2fa 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -58,15 +56,15 @@ integTest.enabled = false // Instead we create a separate task to run the // tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { - include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' +task internalClusterTest(type: Test) { + description = 'Multi-node tests' + mustRunAfter test + + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test // also add an "alias" task to make typing on the command line easier task icTest { task icTest { diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index fb089afb1ba..067bb218e7e 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -288,7 +288,7 @@ run { plugin xpackModule('core') } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 1a829704d40..19a8d11dd6f 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -23,7 +23,7 @@ dependencyLicenses { } if (project.inFipsJvm) { - unitTest.enabled = false + test.enabled = false testingConventions.enabled = false // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. diff --git a/x-pack/plugin/security/qa/build.gradle b/x-pack/plugin/security/qa/build.gradle index f2f60527ec4..74412a094b4 100644 --- a/x-pack/plugin/security/qa/build.gradle +++ b/x-pack/plugin/security/qa/build.gradle @@ -1,7 +1,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { compile project(':test:framework') diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 31cec8bbee3..c4719aef04a 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -30,14 +28,13 @@ archivesBaseName = 'x-pack-sql' // All integration tests live in qa modules integTest.enabled = false -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - dependsOn: unitTest.dependsOn) { +task internalClusterTest(type: Test) { + mustRunAfter test include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index bec79dabb14..3c7eb6b804b 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -37,7 +37,7 @@ dependencyLicenses { ignoreSha 'elasticsearch' } -unitTest { +test { // don't use the shaded jar for tests classpath += project.tasks.compileJava.outputs.files classpath -= project.tasks.shadowJar.outputs.files diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index cf0a0dba8ee..4c9fa6de030 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -30,7 +30,7 @@ dependencies { /* disable unit tests because these are all integration tests used * other qa projects. */ -unitTest.enabled = false +test.enabled = false dependencyLicenses.enabled = false dependenciesInfo.enabled = false diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index 79ebff00854..525acaf99d6 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -44,9 +44,9 @@ subprojects { integTestRunner { def today = new Date().format('yyyy-MM-dd') - systemProperty 'tests.audit.logfile', + nonInputProperties.systemProperty 'tests.audit.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit.json" - systemProperty 'tests.audit.yesterday.logfile', + nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit-${today}.json" } diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index e519062e14f..6f57ea279c5 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -106,7 +106,7 @@ task runcli { } // Use the jar for testing so we can get the proper version information -unitTest { +test { classpath -= compileJava.outputs.files classpath -= configurations.compile classpath -= configurations.runtime diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index 3efa13fc320..5719f03fc6a 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -115,7 +115,7 @@ run { plugin xpackModule('core') } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/x-pack/qa/evil-tests/build.gradle b/x-pack/qa/evil-tests/build.gradle index ad32645b364..d411909fb31 100644 --- a/x-pack/qa/evil-tests/build.gradle +++ b/x-pack/qa/evil-tests/build.gradle @@ -5,7 +5,7 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } -unitTest { +test { systemProperty 'tests.security.manager', 'false' include '**/*Tests.class' } diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index da06c6ac5ef..964cc2fb43c 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -7,7 +7,7 @@ import java.nio.charset.StandardCharsets // Apply the java plugin to this project so the sources can be edited in an IDE apply plugin: 'elasticsearch.standalone-test' -unitTest.enabled = false +test.enabled = false dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle index 63265b6949f..c31b2c0ad1d 100644 --- a/x-pack/qa/multi-cluster-search-security/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -75,5 +75,5 @@ task integTest { dependsOn = [mixedClusterTest] } -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test check.dependsOn(integTest) diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 25d4da0b2a9..770dfffce7c 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -122,7 +122,7 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task bwcTestSnapshots { diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle index 0df8740424d..2715b4e7024 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle +++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle @@ -45,8 +45,8 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'none' systemProperty 'tests.rest.cluster_name', 'leader' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" } // ============================================================================================ @@ -76,11 +76,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'none' systemProperty 'tests.rest.cluster_name', 'follower' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" - systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(0).transportUri()}" } // ============================================================================================ @@ -121,11 +121,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'one_third' systemProperty 'tests.rest.cluster_name', 'follower' - systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(1).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(1).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(1).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(1).transportUri()}" - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" finalizedBy "${taskPrefix}#follower#clusterTestCluster#node1.stop" } @@ -142,11 +142,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'two_third' systemProperty 'tests.rest.cluster_name', 'follower' - systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(2).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(2).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(2).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(2).transportUri()}" - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" finalizedBy "${taskPrefix}#follower#clusterTestCluster#node2.stop" } @@ -163,11 +163,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'all' systemProperty 'tests.rest.cluster_name', 'follower' - systemProperty 'tests.follower_host', "${-> followerOneThirdUpgradedTest.nodes.get(0).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerOneThirdUpgradedTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerOneThirdUpgradedTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerOneThirdUpgradedTest.nodes.get(0).transportUri()}" - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" // This is needed, otherwise leader node 0 will stop after the leaderClusterTestRunner task has run. // Here it is ok to stop, because in the next task, the leader node 0 gets upgraded. @@ -190,11 +190,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'one_third' systemProperty 'tests.rest.cluster_name', 'leader' - systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(2).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(2).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(2).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(2).transportUri()}" finalizedBy "${taskPrefix}#leader#clusterTestCluster#node1.stop" } @@ -211,11 +211,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'two_third' systemProperty 'tests.rest.cluster_name', 'leader' - systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" - systemProperty 'tests.leader_host', "${-> leaderOneThirdUpgradedTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderOneThirdUpgradedTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderOneThirdUpgradedTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderOneThirdUpgradedTest.nodes.get(0).transportUri()}" finalizedBy "${taskPrefix}#leader#clusterTestCluster#node2.stop" } @@ -232,11 +232,11 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.upgrade_state', 'all' systemProperty 'tests.rest.cluster_name', 'leader' - systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" - systemProperty 'tests.leader_host', "${-> leaderTwoThirdsUpgradedTest.nodes.get(0).httpUri()}" - systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderTwoThirdsUpgradedTest.nodes.get(0).transportUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderTwoThirdsUpgradedTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderTwoThirdsUpgradedTest.nodes.get(0).transportUri()}" /* * Force stopping all the upgraded nodes after the test runner @@ -257,7 +257,7 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task integTest { diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index c633a824996..ba9e1ae2ff9 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -7,7 +7,7 @@ import java.nio.charset.StandardCharsets // Apply the java plugin to this project so the sources can be edited in an IDE apply plugin: 'elasticsearch.standalone-test' -unitTest.enabled = false +test.enabled = false dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle index e0c1076bdd7..2d4af2b46bb 100644 --- a/x-pack/qa/third-party/active-directory/build.gradle +++ b/x-pack/qa/third-party/active-directory/build.gradle @@ -22,7 +22,7 @@ forbiddenPatterns { exclude '**/*.der' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 93f5b14ad23..c55123e08d0 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -1,4 +1,4 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.test.fixtures' -unitTest.enabled = false \ No newline at end of file +test.enabled = false \ No newline at end of file diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle index 846c3882987..5b2161de290 100644 --- a/x-pack/test/smb-fixture/build.gradle +++ b/x-pack/test/smb-fixture/build.gradle @@ -1,4 +1,4 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.test.fixtures' -unitTest.enabled = false +test.enabled = false From a0fc2461d783863be2586ac64f775c7b85ef11e3 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 11:33:18 -0700 Subject: [PATCH 12/60] Mute DedicatedClusterSnapshotRestoreIT#testSnapshotWithStuckNode as we await a fix. --- .../snapshots/DedicatedClusterSnapshotRestoreIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b90bc39709c..e1f75bb0a81 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -420,6 +420,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> done"); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39852") public void testSnapshotWithStuckNode() throws Exception { logger.info("--> start 2 nodes"); ArrayList nodes = new ArrayList<>(); From a417905098edc3a500b2c45b64a73993faea7620 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 13:34:58 -0700 Subject: [PATCH 13/60] Mute RareClusterStateIT#testDelayedMappingPropagationOnPrimary as we await a fix. Tracked in #41030. --- .../elasticsearch/cluster/coordination/RareClusterStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index cb5213ee99e..4c0168c6e3a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -181,6 +181,7 @@ public class RareClusterStateIT extends ESIntegTestCase { assertHitCount(client().prepareSearch("test").get(), 0); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/41030") public void testDelayedMappingPropagationOnPrimary() throws Exception { // Here we want to test that things go well if there is a first request // that adds mappings but before mappings are propagated to all nodes From cbae617898b9ef041f9be53516e6b0aa0c788f65 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 14:48:10 -0700 Subject: [PATCH 14/60] Mute IndexFollowingIT#testFollowIndex as we await a fix. Tracked in #41037. --- .../test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 52202982701..f729162c545 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -114,6 +114,7 @@ import static org.hamcrest.Matchers.nullValue; public class IndexFollowingIT extends CcrIntegTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41037") public void testFollowIndex() throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); int numberOfReplicas = between(0, 1); From 2e2e11f7b4db5bf01a606a5099fcad2621a07a78 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 9 Apr 2019 18:18:26 -0400 Subject: [PATCH 15/60] [DOCS] Fix broken links for 7.0 release (#41036) * [DOCS] Remove references to deprecated 'zen.discovery.minimum_master_nodes' setting --- docs/resiliency/index.asciidoc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 27aa620a34f..71a87ef57b4 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -279,24 +279,22 @@ shard will be allocated upon reopening the index. [float] === Use two phase commit for Cluster State publishing (STATUS: DONE, v5.0.0) -A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#fault-detection[monitors the cluster nodes] +A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-fault-detection.html[monitors the cluster nodes] and removes any node from the cluster that doesn't respond to its pings in a timely -fashion. If the master is left with fewer nodes than the `discovery.zen.minimum_master_nodes` -settings, it will step down and a new master election will start. +fashion. If the master is left with too few nodes, it will step down and a new master election will start. When a network partition causes a master node to lose many followers, there is a short window in time until the node loss is detected and the master steps down. During that window, the master may erroneously accept and acknowledge cluster state changes. To avoid this, we introduce a new phase to cluster state publishing where the proposed cluster state is sent to all nodes -but is not yet committed. Only once enough nodes (`discovery.zen.minimum_master_nodes`) actively acknowledge +but is not yet committed. Only once enough nodes actively acknowledge the change, it is committed and commit messages are sent to the nodes. See {GIT}13062[#13062]. [float] === Wait on incoming joins before electing local node as master (STATUS: DONE, v2.0.0) During master election each node pings in order to discover other nodes and validate the liveness of existing -nodes. Based on this information the node either discovers an existing master or, if enough nodes are found -(see https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#master-election[`discovery.zen.minimum_master_nodes`]) a new master will be elected. Currently, the node that is +nodes. Based on this information the node either discovers an existing master or, if enough nodes are found a new master will be elected. Currently, the node that is elected as master will update the cluster state to indicate the result of the election. Other nodes will submit a join request to the newly elected master node. Instead of immediately processing the election result, the elected master node should wait for the incoming joins from other nodes, thus validating that the result of the election is properly applied. As soon as enough From 21c5d7e95fe0c4b4f45190ee7522cdfdb7f2df53 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 15:46:53 -0700 Subject: [PATCH 16/60] Mute CcrRetentionLeaseIT#testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes. Tracked in #39331. --- .../java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index dceb2db9b54..32322d861cd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -265,6 +265,7 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39331") public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); From 0702c721514313fcac19438f2376e21e2faeb470 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 16:27:24 -0700 Subject: [PATCH 17/60] Mute DataFrameGetAndGetStatsIT#testGetPersistedStatsWithoutTask. Tracked in #40963. --- .../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 72176933b2a..202416a1b85 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -105,6 +105,7 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { assertEquals(1, XContentMapValues.extractValue("count", transforms)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40963") @SuppressWarnings("unchecked") public void testGetPersistedStatsWithoutTask() throws Exception { createPivotReviewsTransform("pivot_stats_1", "pivot_reviews_stats_1", null); From 0d5f86a001dc88e38a2b824da998472ecc627d34 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 17:09:08 -0700 Subject: [PATCH 18/60] Mute RollupIndexerStateTests#testIndexing. Tracked in #41046. --- .../elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index b529a87027e..23838a59873 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -256,6 +256,7 @@ public class RollupIndexerStateTests extends ESTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41046") public void testIndexing() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); From c0f715b3374e9f22b61661d3af215c69b22fb069 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 20:21:34 -0400 Subject: [PATCH 19/60] Fix Docker build when sourced from artifacts This commit fixes an issue when the artifact used to build the Docker image is sourced from artifacts.elastic.co. In particular, the artifact was not downloaded to the proper location. --- distribution/docker/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 4492be02d7c..03556edb7c5 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -23,7 +23,7 @@ ext.expansions = { oss -> return [ 'elasticsearch' : elasticsearch, 'license' : oss ? 'Apache-2.0' : 'Elastic License', - 'source_elasticsearch': local() ? "COPY $elasticsearch /opt/" : "RUN curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch}", + 'source_elasticsearch': local() ? "COPY $elasticsearch /opt/" : "RUN cd /opt && curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch} && cd -", 'version' : VersionProperties.elasticsearch ] } From d38214060e204ac18dac556c71294386e05d9173 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 17:29:54 -0700 Subject: [PATCH 20/60] Mute ClusterDisruptionIT#testCannotJoinIfMasterLostDataFolder. Tracked in #41047. --- .../java/org/elasticsearch/discovery/ClusterDisruptionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index acec5a583f5..f60e108c34c 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -386,6 +386,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41047") public void testCannotJoinIfMasterLostDataFolder() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String dataNode = internalCluster().startDataOnlyNode(); From 3aae98f92213d0e098f90d0ffba4403e94258465 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 22:57:35 -0400 Subject: [PATCH 21/60] Add debug logging for leases sync on recovery test This commit adds some debug logging for a retention leases sync on recovery test. --- .../index/seqno/RetentionLeaseIT.java | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 4839844781e..92d31e305ad 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -355,6 +356,7 @@ public class RetentionLeaseIT extends ESIntegTestCase { assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); } + @TestLogging(value = "org.elasticsearch.indices.recovery:trace") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); @@ -382,6 +384,7 @@ public class RetentionLeaseIT extends ESIntegTestCase { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final int length = randomIntBetween(1, 8); final Map currentRetentionLeases = new LinkedHashMap<>(); + logger.info("adding retention [{}}] leases", length); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); @@ -392,19 +395,23 @@ public class RetentionLeaseIT extends ESIntegTestCase { latch.await(); currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); } + logger.info("finished adding [{}] retention leases", length); - // Cause some recoveries to fail to ensure that retention leases are handled properly when retrying a recovery - assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() - .put(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms"))); + // cause some recoveries to fail to ensure that retention leases are handled properly when retrying a recovery + assertAcked(client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), TimeValue.timeValueMillis(100)))); final Semaphore recoveriesToDisrupt = new Semaphore(scaledRandomIntBetween(0, 4)); - final MockTransportService primaryTransportService - = (MockTransportService) internalCluster().getInstance(TransportService.class, primaryShardNodeName); + final MockTransportService primaryTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, primaryShardNodeName); primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE) && recoveriesToDisrupt.tryAcquire()) { if (randomBoolean()) { // return a ConnectTransportException to the START_RECOVERY action - final TransportService replicaTransportService - = internalCluster().getInstance(TransportService.class, connection.getNode().getName()); + final TransportService replicaTransportService = + internalCluster().getInstance(TransportService.class, connection.getNode().getName()); final DiscoveryNode primaryNode = primaryTransportService.getLocalNode(); replicaTransportService.disconnectFromNode(primaryNode); replicaTransportService.connectToNode(primaryNode); @@ -416,6 +423,7 @@ public class RetentionLeaseIT extends ESIntegTestCase { connection.sendRequest(requestId, action, request, options); }); + logger.info("allow [{}] replicas to allocate", numberOfReplicas); // now allow the replicas to be allocated and wait for recovery to finalize allowNodes("index", 1 + numberOfReplicas); ensureGreen("index"); From bb6f060f744b8eb42b4cf44ae9454ad580b6a551 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 23:32:37 -0400 Subject: [PATCH 22/60] Add log message to forget follower test This commit adds a log message to help debug failures in a forget follower test. --- .../java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 32322d861cd..bfa142a64e7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -265,7 +266,6 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39331") public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); @@ -950,6 +950,7 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { "leader_cluster", leaderIndex)).actionGet(); + logger.info(Strings.toString(forgetFollowerResponse)); assertThat(forgetFollowerResponse.getTotalShards(), equalTo(numberOfShards)); assertThat(forgetFollowerResponse.getSuccessfulShards(), equalTo(numberOfShards)); assertThat(forgetFollowerResponse.getFailedShards(), equalTo(0)); From 0157ebf947df9f87e8b28378054da00d511fd11f Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 10 Apr 2019 08:33:34 +0300 Subject: [PATCH 23/60] Increase the interval filtering for CURRENT_DATE/TODAY tests (#40999) (cherry picked from commit a5911330aecee90da5401e468b9976f66a2556f0) --- .../sql/qa/src/main/resources/date.csv-spec | 24 +++++++++---------- .../qa/src/main/resources/docs/docs.csv-spec | 14 +++++------ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec index f417c15d8f0..cd4f7f02f07 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec @@ -35,21 +35,21 @@ SELECT TRUNCATE(YEAR(TODAY() - INTERVAL 50 YEARS) / 1000) AS result; ; -currentDateFilter-Ignore -SELECT first_name FROM test_emp WHERE hire_date > CURRENT_DATE() - INTERVAL 25 YEARS ORDER BY first_name ASC LIMIT 10; +currentDateFilter +SELECT first_name FROM test_emp WHERE hire_date > CURRENT_DATE() - INTERVAL 35 YEARS ORDER BY first_name ASC LIMIT 10; first_name ----------------- -Kazuhito -Kenroku -Lillian -Mayumi -Mingsen -Sailaja -Saniya -Shahaf -Suzette -Tuval +Alejandro +Amabile +Anneke +Anoosh +Arumugam +Basil +Berhard +Berni +Bezalel +Bojan ; currentDateFilterScript diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 0389fdc43f3..d7a60200a5e 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -2400,17 +2400,17 @@ SELECT TODAY() AS result; // end::todayFunction ; -filterToday-Ignore +filterToday // tag::filterToday -SELECT first_name FROM emp WHERE hire_date > TODAY() - INTERVAL 25 YEARS ORDER BY first_name ASC LIMIT 5; +SELECT first_name FROM emp WHERE hire_date > TODAY() - INTERVAL 35 YEARS ORDER BY first_name ASC LIMIT 5; first_name ------------ -Kazuhito -Kenroku -Lillian -Mayumi -Mingsen +Alejandro +Amabile +Anneke +Anoosh +Arumugam // end::filterToday ; From 46b0fdae33868bbeab142b1dc8d12402ba3308fd Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 10 Apr 2019 07:59:17 +0200 Subject: [PATCH 24/60] Add realistic hlrc request serialization test base class and (#40362) changed hlrc ccr request tests to use AbstractRequestTestCase base class. This way the request classes are tested in a more realistic setting. Note this change also adds a test dependency on xpack core module. Similar to #39844 but then for hlrc request serialization tests. Removed iterators from hlrc parsing tests. Use empty xcontent registries. Relates to #39745 --- .../client/AbstractRequestTestCase.java | 65 ++++++++++++++ .../client/AbstractResponseTestCase.java | 25 +++--- .../ccr/PutAutoFollowPatternRequestTests.java | 77 +++++------------ .../client/ccr/PutFollowRequestTests.java | 85 +++++++------------ .../client/ccr/ResumeFollowRequestTests.java | 66 ++++---------- 5 files changed, 147 insertions(+), 171 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java new file mode 100644 index 00000000000..8a10831c477 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +/** + * Base class for HLRC request parsing tests. + * + * This case class facilitates generating client side request test instances and + * verifies that they are correctly parsed into server side request instances. + * + * @param The class representing the request on the client side. + * @param The class representing the request on the server side. + */ +public abstract class AbstractRequestTestCase extends ESTestCase { + + public final void testFromXContent() throws IOException { + final C clientTestInstance = createClientTestInstance(); + + final XContentType xContentType = randomFrom(XContentType.values()); + final BytesReference bytes = toShuffledXContent(clientTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + + final XContent xContent = XContentFactory.xContent(xContentType); + final XContentParser parser = xContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + bytes.streamInput()); + final S serverInstance = doParseToServerInstance(parser); + assertInstances(serverInstance, clientTestInstance); + } + + protected abstract C createClientTestInstance(); + + protected abstract S doParseToServerInstance(XContentParser parser) throws IOException; + + protected abstract void assertInstances(S serverInstance, C clientTestInstance); + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java index ebdbfc05035..8565ca14a90 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.client; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -42,23 +41,19 @@ import java.io.IOException; */ public abstract class AbstractResponseTestCase extends ESTestCase { - private static final int NUMBER_OF_TEST_RUNS = 20; - public final void testFromXContent() throws IOException { - for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { - final S serverTestInstance = createServerTestInstance(); + final S serverTestInstance = createServerTestInstance(); - final XContentType xContentType = randomFrom(XContentType.values()); - final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + final XContentType xContentType = randomFrom(XContentType.values()); + final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); - final XContent xContent = XContentFactory.xContent(xContentType); - final XContentParser parser = xContent.createParser( - new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), - LoggingDeprecationHandler.INSTANCE, - bytes.streamInput()); - final C clientInstance = doParseToClientInstance(parser); - assertInstances(serverTestInstance, clientInstance); - } + final XContent xContent = XContentFactory.xContent(xContentType); + final XContentParser parser = xContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + bytes.streamInput()); + final C clientInstance = doParseToClientInstance(parser); + assertInstances(serverTestInstance, clientInstance); } protected abstract S createServerTestInstance(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutAutoFollowPatternRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutAutoFollowPatternRequestTests.java index 429cc4a9f90..b9ee34c8bda 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutAutoFollowPatternRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutAutoFollowPatternRequestTests.java @@ -19,71 +19,24 @@ package org.elasticsearch.client.ccr; +import org.elasticsearch.client.AbstractRequestTestCase; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import java.io.IOException; import java.util.Arrays; -import java.util.List; -public class PutAutoFollowPatternRequestTests extends AbstractXContentTestCase { +import static org.elasticsearch.client.ccr.PutFollowRequestTests.assertFollowConfig; +import static org.hamcrest.Matchers.equalTo; - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("test_parser", - true, (args) -> new PutAutoFollowPatternRequest("name", (String) args[0], (List) args[1])); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.REMOTE_CLUSTER_FIELD); - PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD); - PARSER.declareString(PutAutoFollowPatternRequest::setFollowIndexNamePattern, PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD); - PARSER.declareInt(PutAutoFollowPatternRequest::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - PutAutoFollowPatternRequest::setMaxReadRequestSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()), - PutFollowRequest.MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(PutAutoFollowPatternRequest::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(PutAutoFollowPatternRequest::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - PutAutoFollowPatternRequest::setMaxWriteRequestSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()), - PutFollowRequest.MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(PutAutoFollowPatternRequest::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(PutAutoFollowPatternRequest::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - PutAutoFollowPatternRequest::setMaxWriteBufferSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()), - PutFollowRequest.MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField( - PutAutoFollowPatternRequest::setMaxRetryDelay, - (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()), - PutFollowRequest.MAX_RETRY_DELAY_FIELD, - ObjectParser.ValueType.STRING); - PARSER.declareField( - PutAutoFollowPatternRequest::setReadPollTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()), - PutFollowRequest.READ_POLL_TIMEOUT, - ObjectParser.ValueType.STRING); - } +public class PutAutoFollowPatternRequestTests extends AbstractRequestTestCase< + PutAutoFollowPatternRequest, + PutAutoFollowPatternAction.Request> { @Override - protected PutAutoFollowPatternRequest doParseInstance(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - protected PutAutoFollowPatternRequest createTestInstance() { + protected PutAutoFollowPatternRequest createClientTestInstance() { // Name isn't serialized, because it specified in url path, so no need to randomly generate it here. PutAutoFollowPatternRequest putAutoFollowPatternRequest = new PutAutoFollowPatternRequest("name", randomAlphaOfLength(4), Arrays.asList(generateRandomStringArray(4, 4, false))); @@ -123,4 +76,18 @@ public class PutAutoFollowPatternRequestTests extends AbstractXContentTestCase

{ +import static org.hamcrest.Matchers.equalTo; - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("test_parser", - true, (args) -> new PutFollowRequest((String) args[0], (String) args[1], "followerIndex")); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.REMOTE_CLUSTER_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.LEADER_INDEX_FIELD); - PARSER.declareInt(PutFollowRequest::setMaxReadRequestOperationCount, PutFollowRequest.MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - PutFollowRequest::setMaxReadRequestSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), PutFollowRequest.MAX_READ_REQUEST_SIZE.getPreferredName()), - PutFollowRequest.MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(PutFollowRequest::setMaxOutstandingReadRequests, PutFollowRequest.MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(PutFollowRequest::setMaxWriteRequestOperationCount, PutFollowRequest.MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - PutFollowRequest::setMaxWriteRequestSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), PutFollowRequest.MAX_WRITE_REQUEST_SIZE.getPreferredName()), - PutFollowRequest.MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(PutFollowRequest::setMaxOutstandingWriteRequests, PutFollowRequest.MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(PutFollowRequest::setMaxWriteBufferCount, PutFollowRequest.MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - PutFollowRequest::setMaxWriteBufferSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), PutFollowRequest.MAX_WRITE_BUFFER_SIZE.getPreferredName()), - PutFollowRequest.MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField( - PutFollowRequest::setMaxRetryDelay, - (p, c) -> TimeValue.parseTimeValue(p.text(), PutFollowRequest.MAX_RETRY_DELAY_FIELD.getPreferredName()), - PutFollowRequest.MAX_RETRY_DELAY_FIELD, - ObjectParser.ValueType.STRING); - PARSER.declareField( - PutFollowRequest::setReadPollTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), PutFollowRequest.READ_POLL_TIMEOUT.getPreferredName()), - PutFollowRequest.READ_POLL_TIMEOUT, - ObjectParser.ValueType.STRING); - } +public class PutFollowRequestTests extends AbstractRequestTestCase { @Override - protected PutFollowRequest doParseInstance(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - - @Override - protected boolean supportsUnknownFields() { - return false; - } - - @Override - protected PutFollowRequest createTestInstance() { + protected PutFollowRequest createClientTestInstance() { PutFollowRequest putFollowRequest = new PutFollowRequest(randomAlphaOfLength(4), randomAlphaOfLength(4), "followerIndex"); if (randomBoolean()) { @@ -115,4 +70,30 @@ public class PutFollowRequestTests extends AbstractXContentTestCase { +import static org.elasticsearch.client.ccr.PutFollowRequestTests.assertFollowConfig; +import static org.hamcrest.Matchers.equalTo; - private static final ObjectParser PARSER = new ObjectParser<>("test_parser", - true, () -> new ResumeFollowRequest("followerIndex")); - - static { - PARSER.declareInt(ResumeFollowRequest::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ResumeFollowRequest::setMaxReadRequestSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()), - PutFollowRequest.MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ResumeFollowRequest::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(ResumeFollowRequest::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ResumeFollowRequest::setMaxWriteRequestSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()), - PutFollowRequest.MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ResumeFollowRequest::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(ResumeFollowRequest::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - ResumeFollowRequest::setMaxWriteBufferSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()), - PutFollowRequest.MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField( - ResumeFollowRequest::setMaxRetryDelay, - (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()), - PutFollowRequest.MAX_RETRY_DELAY_FIELD, - ObjectParser.ValueType.STRING); - PARSER.declareField( - ResumeFollowRequest::setReadPollTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()), - PutFollowRequest.READ_POLL_TIMEOUT, - ObjectParser.ValueType.STRING); - } +public class ResumeFollowRequestTests extends AbstractRequestTestCase { @Override - protected ResumeFollowRequest doParseInstance(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - protected ResumeFollowRequest createTestInstance() { + protected ResumeFollowRequest createClientTestInstance() { ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest("followerIndex"); if (randomBoolean()) { resumeFollowRequest.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); @@ -111,4 +68,15 @@ public class ResumeFollowRequestTests extends AbstractXContentTestCase Date: Wed, 10 Apr 2019 08:00:58 +0200 Subject: [PATCH 25/60] [ML-DataFrame] create checkpoints on every new run (#40725) Use the checkpoint service to create a checkpoint on every new run. Expose checkpoints stats on _stats endpoint. --- .../DataFrameTransformCheckpointStats.java | 93 +++++++ .../DataFrameTransformCheckpointingInfo.java | 102 +++++++ .../transforms/DataFrameTransformState.java | 18 +- .../DataFrameTransformStateAndStats.java | 20 +- ...ataFrameTransformCheckpointStatsTests.java | 51 ++++ ...aFrameTransformCheckpointingInfoTests.java | 61 +++++ .../DataFrameTransformStateAndStatsTests.java | 5 +- .../DataFrameTransformStateTests.java | 2 +- .../xpack/core/dataframe/DataFrameField.java | 11 + .../DataFrameTransformCheckpointStats.java | 108 ++++++++ .../DataFrameTransformCheckpointingInfo.java | 141 ++++++++++ .../transforms/DataFrameTransformState.java | 33 ++- .../DataFrameTransformStateAndStats.java | 30 +- ...ataFrameTransformCheckpointStatsTests.java | 35 +++ ...aFrameTransformCheckpointingInfoTests.java | 35 +++ .../DataFrameTransformStateAndStatsTests.java | 3 +- ...rameTransformCheckpointStatsHlrcTests.java | 52 ++++ ...meTransformCheckpointingInfoHlrcTests.java | 55 ++++ ...aFrameTransformStateAndStatsHlrcTests.java | 3 +- .../DataFrameTransformStateHlrcTests.java | 2 +- .../integration/DataFrameRestTestCase.java | 12 +- .../DataFrameTaskFailedStateIT.java | 2 +- .../xpack/dataframe/DataFrame.java | 2 +- ...portGetDataFrameTransformsStatsAction.java | 28 +- .../DataFrameTransformsCheckpointService.java | 109 +++++++- .../DataFrameTransformsConfigManager.java | 3 +- .../transforms/DataFrameIndexer.java | 13 +- .../DataFrameTransformCheckpoint.java | 108 ++++++-- .../transforms/DataFrameTransformTask.java | 71 +++-- .../DataFrameSingleNodeTestCase.java | 4 +- ...meTransformCheckpointServiceNodeTests.java | 258 ++++++++++++++++++ ...DataFrameTransformsConfigManagerTests.java | 5 +- .../DataFrameTransformCheckpointTests.java | 116 +++++--- .../test/data_frame/transforms_stats.yml | 4 +- 34 files changed, 1448 insertions(+), 147 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java rename x-pack/plugin/{core/src/main/java/org/elasticsearch/xpack/core => data-frame/src/main/java/org/elasticsearch/xpack}/dataframe/transforms/DataFrameTransformCheckpoint.java (74%) rename x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/{persistence => }/DataFrameSingleNodeTestCase.java (95%) create mode 100644 x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java rename x-pack/plugin/{core/src/test/java/org/elasticsearch/xpack/core => data-frame/src/test/java/org/elasticsearch/xpack}/dataframe/transforms/DataFrameTransformCheckpointTests.java (50%) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java new file mode 100644 index 00000000000..723a38d61ea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DataFrameTransformCheckpointStats { + public static final ParseField TIMESTAMP_MILLIS = new ParseField("timestamp_millis"); + public static final ParseField TIME_UPPER_BOUND_MILLIS = new ParseField("time_upper_bound_millis"); + + public static DataFrameTransformCheckpointStats EMPTY = new DataFrameTransformCheckpointStats(0L, 0L); + + private final long timestampMillis; + private final long timeUpperBoundMillis; + + public static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + "data_frame_transform_checkpoint_stats", true, args -> { + long timestamp = args[0] == null ? 0L : (Long) args[0]; + long timeUpperBound = args[1] == null ? 0L : (Long) args[1]; + + return new DataFrameTransformCheckpointStats(timestamp, timeUpperBound); + }); + + static { + LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), TIMESTAMP_MILLIS); + LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), TIME_UPPER_BOUND_MILLIS); + } + + public static DataFrameTransformCheckpointStats fromXContent(XContentParser parser) throws IOException { + return LENIENT_PARSER.parse(parser, null); + } + + public DataFrameTransformCheckpointStats(final long timestampMillis, final long timeUpperBoundMillis) { + this.timestampMillis = timestampMillis; + this.timeUpperBoundMillis = timeUpperBoundMillis; + } + + public DataFrameTransformCheckpointStats(StreamInput in) throws IOException { + this.timestampMillis = in.readLong(); + this.timeUpperBoundMillis = in.readLong(); + } + + public long getTimestampMillis() { + return timestampMillis; + } + + public long getTimeUpperBoundMillis() { + return timeUpperBoundMillis; + } + + @Override + public int hashCode() { + return Objects.hash(timestampMillis, timeUpperBoundMillis); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformCheckpointStats that = (DataFrameTransformCheckpointStats) other; + + return this.timestampMillis == that.timestampMillis && this.timeUpperBoundMillis == that.timeUpperBoundMillis; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java new file mode 100644 index 00000000000..d00cf173e9e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +public class DataFrameTransformCheckpointingInfo { + + public static final ParseField CURRENT_CHECKPOINT = new ParseField("current"); + public static final ParseField IN_PROGRESS_CHECKPOINT = new ParseField("in_progress"); + public static final ParseField OPERATIONS_BEHIND = new ParseField("operations_behind"); + + private final DataFrameTransformCheckpointStats current; + private final DataFrameTransformCheckpointStats inProgress; + private final long operationsBehind; + + + private static final ConstructingObjectParser LENIENT_PARSER = + new ConstructingObjectParser<>( + "data_frame_transform_checkpointing_info", true, a -> { + long behind = a[2] == null ? 0L : (Long) a[2]; + + return new DataFrameTransformCheckpointingInfo( + a[0] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[0], + a[1] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[1], behind); + }); + + static { + LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataFrameTransformCheckpointStats.fromXContent(p), CURRENT_CHECKPOINT); + LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataFrameTransformCheckpointStats.fromXContent(p), IN_PROGRESS_CHECKPOINT); + LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), OPERATIONS_BEHIND); + } + + public DataFrameTransformCheckpointingInfo(DataFrameTransformCheckpointStats current, DataFrameTransformCheckpointStats inProgress, + long operationsBehind) { + this.current = Objects.requireNonNull(current); + this.inProgress = Objects.requireNonNull(inProgress); + this.operationsBehind = operationsBehind; + } + + public DataFrameTransformCheckpointStats getCurrent() { + return current; + } + + public DataFrameTransformCheckpointStats getInProgress() { + return inProgress; + } + + public long getOperationsBehind() { + return operationsBehind; + } + + public static DataFrameTransformCheckpointingInfo fromXContent(XContentParser p) { + return LENIENT_PARSER.apply(p, null); + } + + @Override + public int hashCode() { + return Objects.hash(current, inProgress, operationsBehind); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformCheckpointingInfo that = (DataFrameTransformCheckpointingInfo) other; + + return Objects.equals(this.current, that.current) && + Objects.equals(this.inProgress, that.inProgress) && + this.operationsBehind == that.operationsBehind; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java index 248ee9a18f5..6bbc7a00b1b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java @@ -42,7 +42,7 @@ public class DataFrameTransformState { private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); private static final ParseField TASK_STATE = new ParseField("task_state"); private static final ParseField CURRENT_POSITION = new ParseField("current_position"); - private static final ParseField GENERATION = new ParseField("generation"); + private static final ParseField CHECKPOINT = new ParseField("checkpoint"); private static final ParseField REASON = new ParseField("reason"); @SuppressWarnings("unchecked") @@ -69,7 +69,7 @@ public class DataFrameTransformState { } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); - PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), GENERATION); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON); } @@ -79,19 +79,19 @@ public class DataFrameTransformState { private final DataFrameTransformTaskState taskState; private final IndexerState indexerState; - private final long generation; + private final long checkpoint; private final SortedMap currentPosition; private final String reason; public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, @Nullable Map position, - long generation, + long checkpoint, @Nullable String reason) { this.taskState = taskState; this.indexerState = indexerState; this.currentPosition = position == null ? null : Collections.unmodifiableSortedMap(new TreeMap<>(position)); - this.generation = generation; + this.checkpoint = checkpoint; this.reason = reason; } @@ -108,8 +108,8 @@ public class DataFrameTransformState { return currentPosition; } - public long getGeneration() { - return generation; + public long getCheckpoint() { + return checkpoint; } @Nullable @@ -132,13 +132,13 @@ public class DataFrameTransformState { return Objects.equals(this.taskState, that.taskState) && Objects.equals(this.indexerState, that.indexerState) && Objects.equals(this.currentPosition, that.currentPosition) && - this.generation == that.generation && + this.checkpoint == that.checkpoint && Objects.equals(this.reason, that.reason); } @Override public int hashCode() { - return Objects.hash(taskState, indexerState, currentPosition, generation, reason); + return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java index 9c45bfb1325..938563796ca 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java @@ -31,16 +31,20 @@ public class DataFrameTransformStateAndStats { public static final ParseField ID = new ParseField("id"); public static final ParseField STATE_FIELD = new ParseField("state"); public static final ParseField STATS_FIELD = new ParseField("stats"); + public static final ParseField CHECKPOINTING_INFO_FIELD = new ParseField("checkpointing"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_frame_transform_state_and_stats", true, - a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2])); + a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2], + (DataFrameTransformCheckpointingInfo) a[3])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataFrameTransformState.PARSER::apply, STATE_FIELD); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p), STATS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataFrameTransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD); } public static DataFrameTransformStateAndStats fromXContent(XContentParser parser) throws IOException { @@ -50,11 +54,14 @@ public class DataFrameTransformStateAndStats { private final String id; private final DataFrameTransformState transformState; private final DataFrameIndexerTransformStats transformStats; + private final DataFrameTransformCheckpointingInfo checkpointingInfo; - public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats) { + public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats, + DataFrameTransformCheckpointingInfo checkpointingInfo) { this.id = id; this.transformState = state; this.transformStats = stats; + this.checkpointingInfo = checkpointingInfo; } public String getId() { @@ -69,9 +76,13 @@ public class DataFrameTransformStateAndStats { return transformState; } + public DataFrameTransformCheckpointingInfo getCheckpointingInfo() { + return checkpointingInfo; + } + @Override public int hashCode() { - return Objects.hash(id, transformState, transformStats); + return Objects.hash(id, transformState, transformStats, checkpointingInfo); } @Override @@ -87,6 +98,7 @@ public class DataFrameTransformStateAndStats { DataFrameTransformStateAndStats that = (DataFrameTransformStateAndStats) other; return Objects.equals(this.id, that.id) && Objects.equals(this.transformState, that.transformState) - && Objects.equals(this.transformStats, that.transformStats); + && Objects.equals(this.transformStats, that.transformStats) + && Objects.equals(this.checkpointingInfo, that.checkpointingInfo); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java new file mode 100644 index 00000000000..23019a0e58f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class DataFrameTransformCheckpointStatsTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + DataFrameTransformCheckpointStatsTests::randomDataFrameTransformCheckpointStats, + DataFrameTransformCheckpointStatsTests::toXContent, + DataFrameTransformCheckpointStats::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() { + return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000)); + } + + public static void toXContent(DataFrameTransformCheckpointStats stats, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field("timestamp_millis", stats.getTimestampMillis()); + builder.field("time_upper_bound_millis", stats.getTimeUpperBoundMillis()); + builder.endObject(); + } + +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java new file mode 100644 index 00000000000..8f12d90a1c4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class DataFrameTransformCheckpointingInfoTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + DataFrameTransformCheckpointingInfoTests::randomDataFrameTransformCheckpointingInfo, + DataFrameTransformCheckpointingInfoTests::toXContent, + DataFrameTransformCheckpointingInfo::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { + return new DataFrameTransformCheckpointingInfo( + DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), + DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), + randomLongBetween(0, 10000)); + } + + public static void toXContent(DataFrameTransformCheckpointingInfo info, XContentBuilder builder) throws IOException { + builder.startObject(); + if (info.getCurrent().getTimestampMillis() > 0) { + builder.field("current"); + DataFrameTransformCheckpointStatsTests.toXContent(info.getCurrent(), builder); + } + if (info.getInProgress().getTimestampMillis() > 0) { + builder.field("in_progress"); + DataFrameTransformCheckpointStatsTests.toXContent(info.getInProgress(), builder); + } + builder.field("operations_behind", info.getOperationsBehind()); + builder.endObject(); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java index 5c75120f86e..88628699104 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java @@ -41,7 +41,8 @@ public class DataFrameTransformStateAndStatsTests extends ESTestCase { public static DataFrameTransformStateAndStats randomInstance() { return new DataFrameTransformStateAndStats(randomAlphaOfLength(10), DataFrameTransformStateTests.randomDataFrameTransformState(), - DataFrameIndexerTransformStatsTests.randomStats()); + DataFrameIndexerTransformStatsTests.randomStats(), + DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo()); } public static void toXContent(DataFrameTransformStateAndStats stateAndStats, XContentBuilder builder) throws IOException { @@ -51,6 +52,8 @@ public class DataFrameTransformStateAndStatsTests extends ESTestCase { DataFrameTransformStateTests.toXContent(stateAndStats.getTransformState(), builder); builder.field(DataFrameTransformStateAndStats.STATS_FIELD.getPreferredName()); DataFrameIndexerTransformStatsTests.toXContent(stateAndStats.getTransformStats(), builder); + builder.field(DataFrameTransformStateAndStats.CHECKPOINTING_INFO_FIELD.getPreferredName()); + DataFrameTransformCheckpointingInfoTests.toXContent(stateAndStats.getCheckpointingInfo(), builder); builder.endObject(); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java index fa1ac4202f9..7d1d713a127 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java @@ -56,7 +56,7 @@ public class DataFrameTransformStateTests extends ESTestCase { if (state.getPosition() != null) { builder.field("current_position", state.getPosition()); } - builder.field("generation", state.getGeneration()); + builder.field("checkpoint", state.getCheckpoint()); if (state.getReason() != null) { builder.field("reason", state.getReason()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java index 154bd206ade..71bf14cdeb4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -28,6 +28,17 @@ public final class DataFrameField { public static final ParseField DESTINATION = new ParseField("dest"); public static final ParseField FORCE = new ParseField("force"); + /** + * Fields for checkpointing + */ + // the timestamp of the checkpoint, mandatory + public static final ParseField TIMESTAMP_MILLIS = new ParseField("timestamp_millis"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + // checkpoint for for time based sync + // TODO: consider a lower bound for usecases where you want to transform on a window of a stream + public static final ParseField TIME_UPPER_BOUND_MILLIS = new ParseField("time_upper_bound_millis"); + public static final ParseField TIME_UPPER_BOUND = new ParseField("time_upper_bound"); + // common strings public static final String TASK_NAME = "data_frame/transforms"; public static final String REST_BASE_PATH = "/_data_frame/"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java new file mode 100644 index 00000000000..4dd4b68905f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +import java.io.IOException; +import java.util.Objects; + +/** + * Checkpoint stats data for 1 checkpoint + * + * This is the user-facing side of DataFrameTransformCheckpoint, containing only the stats to be exposed. + */ +public class DataFrameTransformCheckpointStats implements Writeable, ToXContentObject { + + public static DataFrameTransformCheckpointStats EMPTY = new DataFrameTransformCheckpointStats(0L, 0L); + + private final long timestampMillis; + private final long timeUpperBoundMillis; + + private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + "data_frame_transform_checkpoint_stats", true, args -> { + long timestamp = args[0] == null ? 0L : (Long) args[0]; + long timeUpperBound = args[1] == null ? 0L : (Long) args[1]; + + return new DataFrameTransformCheckpointStats(timestamp, timeUpperBound); + }); + + static { + LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), DataFrameField.TIMESTAMP_MILLIS); + LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), DataFrameField.TIME_UPPER_BOUND_MILLIS); + } + + public DataFrameTransformCheckpointStats(final long timestampMillis, final long timeUpperBoundMillis) { + this.timestampMillis = timestampMillis; + this.timeUpperBoundMillis = timeUpperBoundMillis; + } + + public DataFrameTransformCheckpointStats(StreamInput in) throws IOException { + this.timestampMillis = in.readLong(); + this.timeUpperBoundMillis = in.readLong(); + } + + public long getTimestampMillis() { + return timestampMillis; + } + + public long getTimeUpperBoundMillis() { + return timeUpperBoundMillis; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.timeField(DataFrameField.TIMESTAMP_MILLIS.getPreferredName(), DataFrameField.TIMESTAMP.getPreferredName(), + getTimestampMillis()); + if (timeUpperBoundMillis > 0) { + builder.timeField(DataFrameField.TIME_UPPER_BOUND_MILLIS.getPreferredName(), DataFrameField.TIME_UPPER_BOUND.getPreferredName(), + timeUpperBoundMillis); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(timestampMillis); + out.writeLong(timeUpperBoundMillis); + } + + @Override + public int hashCode() { + return Objects.hash(timestampMillis, timeUpperBoundMillis); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformCheckpointStats that = (DataFrameTransformCheckpointStats) other; + + return this.timestampMillis == that.timestampMillis && + this.timeUpperBoundMillis == that.timeUpperBoundMillis; + } + + public static DataFrameTransformCheckpointStats fromXContent(XContentParser p) { + return LENIENT_PARSER.apply(p, null); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java new file mode 100644 index 00000000000..1b66be41add --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Holds information about checkpointing regarding + * - the current checkpoint + * - the in progress checkpoint + * - the current state of the source + */ +public class DataFrameTransformCheckpointingInfo implements Writeable, ToXContentObject { + + public static DataFrameTransformCheckpointingInfo EMPTY = new DataFrameTransformCheckpointingInfo( + DataFrameTransformCheckpointStats.EMPTY, + DataFrameTransformCheckpointStats.EMPTY, + 0L); + + public static final ParseField CURRENT_CHECKPOINT = new ParseField("current"); + public static final ParseField IN_PROGRESS_CHECKPOINT = new ParseField("in_progress"); + public static final ParseField OPERATIONS_BEHIND = new ParseField("operations_behind"); + + private final DataFrameTransformCheckpointStats current; + private final DataFrameTransformCheckpointStats inProgress; + private final long operationsBehind; + + private static final ConstructingObjectParser LENIENT_PARSER = + new ConstructingObjectParser<>( + "data_frame_transform_checkpointing_info", true, a -> { + long behind = a[2] == null ? 0L : (Long) a[2]; + + return new DataFrameTransformCheckpointingInfo( + a[0] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[0], + a[1] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[1], behind); + }); + + static { + LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataFrameTransformCheckpointStats.fromXContent(p), CURRENT_CHECKPOINT); + LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataFrameTransformCheckpointStats.fromXContent(p), IN_PROGRESS_CHECKPOINT); + LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), OPERATIONS_BEHIND); + } + + /** + * Create checkpoint stats object with checkpoint information about the current and in progress checkpoint as well as the current state + * of source. + * + * @param current stats of the current checkpoint + * @param inProgress stats of the in progress checkpoint + * @param operationsBehind counter of operations the current checkpoint is behind source + */ + public DataFrameTransformCheckpointingInfo(DataFrameTransformCheckpointStats current, DataFrameTransformCheckpointStats inProgress, + long operationsBehind) { + this.current = Objects.requireNonNull(current); + this.inProgress = Objects.requireNonNull(inProgress); + this.operationsBehind = operationsBehind; + } + + public DataFrameTransformCheckpointingInfo(StreamInput in) throws IOException { + current = new DataFrameTransformCheckpointStats(in); + inProgress = new DataFrameTransformCheckpointStats(in); + operationsBehind = in.readLong(); + } + + public DataFrameTransformCheckpointStats getCurrent() { + return current; + } + + public DataFrameTransformCheckpointStats getInProgress() { + return inProgress; + } + + public long getOperationsBehind() { + return operationsBehind; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (current.getTimestampMillis() > 0) { + builder.field(CURRENT_CHECKPOINT.getPreferredName(), current); + } + if (inProgress.getTimestampMillis() > 0) { + builder.field(IN_PROGRESS_CHECKPOINT.getPreferredName(), inProgress); + } + + builder.field(OPERATIONS_BEHIND.getPreferredName(), operationsBehind); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + current.writeTo(out); + inProgress.writeTo(out); + out.writeLong(operationsBehind); + } + + public static DataFrameTransformCheckpointingInfo fromXContent(XContentParser p) { + return LENIENT_PARSER.apply(p, null); + } + + @Override + public int hashCode() { + return Objects.hash(current, inProgress, operationsBehind); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformCheckpointingInfo that = (DataFrameTransformCheckpointingInfo) other; + + return Objects.equals(this.current, that.current) && + Objects.equals(this.inProgress, that.inProgress) && + this.operationsBehind == that.operationsBehind; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java index 4b90407ee1a..76d68cf178c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java @@ -35,7 +35,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState private final DataFrameTransformTaskState taskState; private final IndexerState indexerState; - private final long generation; + private final long checkpoint; @Nullable private final SortedMap currentPosition; @@ -45,7 +45,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState private static final ParseField TASK_STATE = new ParseField("task_state"); private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); private static final ParseField CURRENT_POSITION = new ParseField("current_position"); - private static final ParseField GENERATION = new ParseField("generation"); + private static final ParseField CHECKPOINT = new ParseField("checkpoint"); private static final ParseField REASON = new ParseField("reason"); @SuppressWarnings("unchecked") @@ -80,19 +80,19 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); - PARSER.declareLong(constructorArg(), GENERATION); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(optionalConstructorArg(), REASON); } public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, @Nullable Map position, - long generation, + long checkpoint, @Nullable String reason) { this.taskState = taskState; this.indexerState = indexerState; this.currentPosition = position == null ? null : Collections.unmodifiableSortedMap(new TreeMap<>(position)); - this.generation = generation; + this.checkpoint = checkpoint; this.reason = reason; } @@ -100,7 +100,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState taskState = DataFrameTransformTaskState.fromStream(in); indexerState = IndexerState.fromStream(in); currentPosition = in.readBoolean() ? Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())) : null; - generation = in.readLong(); + checkpoint = in.readLong(); reason = in.readOptionalString(); } @@ -116,8 +116,17 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState return currentPosition; } - public long getGeneration() { - return generation; + public long getCheckpoint() { + return checkpoint; + } + + /** + * Get the in-progress checkpoint + * + * @return checkpoint in progress or 0 if task/indexer is not active + */ + public long getInProgressCheckpoint() { + return indexerState.equals(IndexerState.INDEXING) ? checkpoint + 1L : 0; } public String getReason() { @@ -140,7 +149,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState if (currentPosition != null) { builder.field(CURRENT_POSITION.getPreferredName(), currentPosition); } - builder.field(GENERATION.getPreferredName(), generation); + builder.field(CHECKPOINT.getPreferredName(), checkpoint); if (reason != null) { builder.field(REASON.getPreferredName(), reason); } @@ -161,7 +170,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState if (currentPosition != null) { out.writeMap(currentPosition); } - out.writeLong(generation); + out.writeLong(checkpoint); out.writeOptionalString(reason); } @@ -180,13 +189,13 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState return Objects.equals(this.taskState, that.taskState) && Objects.equals(this.indexerState, that.indexerState) && Objects.equals(this.currentPosition, that.currentPosition) && - this.generation == that.generation && + this.checkpoint == that.checkpoint && Objects.equals(this.reason, that.reason); } @Override public int hashCode() { - return Objects.hash(taskState, indexerState, currentPosition, generation, reason); + return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java index 8cb1ebd3144..57171fca43f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java @@ -24,20 +24,27 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj private static final String NAME = "data_frame_transform_state_and_stats"; public static final ParseField STATE_FIELD = new ParseField("state"); + public static final ParseField CHECKPOINTING_INFO_FIELD = new ParseField("checkpointing"); private final String id; private final DataFrameTransformState transformState; private final DataFrameIndexerTransformStats transformStats; + private final DataFrameTransformCheckpointingInfo checkpointingInfo; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, true, - a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2])); + a -> new DataFrameTransformStateAndStats((String) a[0], + (DataFrameTransformState) a[1], + (DataFrameIndexerTransformStats) a[2], + (DataFrameTransformCheckpointingInfo) a[3])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataFrameTransformState.PARSER::apply, STATE_FIELD); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p), DataFrameField.STATS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataFrameTransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD); } public static DataFrameTransformStateAndStats initialStateAndStats(String id) { @@ -47,27 +54,32 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj public static DataFrameTransformStateAndStats initialStateAndStats(String id, DataFrameIndexerTransformStats indexerTransformStats) { return new DataFrameTransformStateAndStats(id, new DataFrameTransformState(DataFrameTransformTaskState.STOPPED, IndexerState.STOPPED, null, 0L, null), - indexerTransformStats); + indexerTransformStats, + DataFrameTransformCheckpointingInfo.EMPTY); } - public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats) { + public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats, + DataFrameTransformCheckpointingInfo checkpointingInfo) { this.id = Objects.requireNonNull(id); this.transformState = Objects.requireNonNull(state); this.transformStats = Objects.requireNonNull(stats); + this.checkpointingInfo = Objects.requireNonNull(checkpointingInfo); } public DataFrameTransformStateAndStats(StreamInput in) throws IOException { this.id = in.readString(); this.transformState = new DataFrameTransformState(in); this.transformStats = new DataFrameIndexerTransformStats(in); + this.checkpointingInfo = new DataFrameTransformCheckpointingInfo(in); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(DataFrameField.ID.getPreferredName(), id); - builder.field(STATE_FIELD.getPreferredName(), transformState); + builder.field(STATE_FIELD.getPreferredName(), transformState, params); builder.field(DataFrameField.STATS_FIELD.getPreferredName(), transformStats, params); + builder.field(CHECKPOINTING_INFO_FIELD.getPreferredName(), checkpointingInfo, params); builder.endObject(); return builder; } @@ -77,11 +89,12 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj out.writeString(id); transformState.writeTo(out); transformStats.writeTo(out); + checkpointingInfo.writeTo(out); } @Override public int hashCode() { - return Objects.hash(id, transformState, transformStats); + return Objects.hash(id, transformState, transformStats, checkpointingInfo); } @Override @@ -97,7 +110,8 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj DataFrameTransformStateAndStats that = (DataFrameTransformStateAndStats) other; return Objects.equals(this.id, that.id) && Objects.equals(this.transformState, that.transformState) - && Objects.equals(this.transformStats, that.transformStats); + && Objects.equals(this.transformStats, that.transformStats) + && Objects.equals(this.checkpointingInfo, that.checkpointingInfo); } public String getId() { @@ -112,6 +126,10 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj return transformState; } + public DataFrameTransformCheckpointingInfo getCheckpointingInfo() { + return checkpointingInfo; + } + @Override public String toString() { return Strings.toString(this); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java new file mode 100644 index 00000000000..88eab817d06 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class DataFrameTransformCheckpointStatsTests extends AbstractSerializingDataFrameTestCase +{ + public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() { + return new DataFrameTransformCheckpointStats(randomNonNegativeLong(), randomNonNegativeLong()); + } + + @Override + protected DataFrameTransformCheckpointStats doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformCheckpointStats.fromXContent(parser); + } + + @Override + protected DataFrameTransformCheckpointStats createTestInstance() { + return randomDataFrameTransformCheckpointStats(); + } + + @Override + protected Reader instanceReader() { + return DataFrameTransformCheckpointStats::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java new file mode 100644 index 00000000000..b1c28073c75 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class DataFrameTransformCheckpointingInfoTests extends AbstractSerializingDataFrameTestCase { + + public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { + return new DataFrameTransformCheckpointingInfo(DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), + DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), randomNonNegativeLong()); + } + + @Override + protected DataFrameTransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformCheckpointingInfo.fromXContent(parser); + } + + @Override + protected DataFrameTransformCheckpointingInfo createTestInstance() { + return randomDataFrameTransformCheckpointingInfo(); + } + + @Override + protected Reader instanceReader() { + return DataFrameTransformCheckpointingInfo::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStatsTests.java index 4f80d0d0b45..89ce1e6d840 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStatsTests.java @@ -22,7 +22,8 @@ public class DataFrameTransformStateAndStatsTests extends AbstractSerializingDat public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) { return new DataFrameTransformStateAndStats(id, DataFrameTransformStateTests.randomDataFrameTransformState(), - DataFrameIndexerTransformStatsTests.randomStats(id)); + DataFrameIndexerTransformStatsTests.randomStats(id), + DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo()); } public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java new file mode 100644 index 00000000000..d29cfa2784f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStatsTests; + +import java.io.IOException; + +public class DataFrameTransformCheckpointStatsHlrcTests extends AbstractHlrcXContentTestCase< + DataFrameTransformCheckpointStats, + org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats> { + + public static DataFrameTransformCheckpointStats fromHlrc( + org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats instance) { + return new DataFrameTransformCheckpointStats(instance.getTimestampMillis(), instance.getTimeUpperBoundMillis()); + } + + @Override + public org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats doHlrcParseInstance(XContentParser parser) + throws IOException { + return org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats.fromXContent(parser); + } + + @Override + public DataFrameTransformCheckpointStats convertHlrcToInternal( + org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats instance) { + return fromHlrc(instance); + } + + @Override + protected DataFrameTransformCheckpointStats createTestInstance() { + return DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(); + } + + @Override + protected DataFrameTransformCheckpointStats doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformCheckpointStats.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java new file mode 100644 index 00000000000..10f0bf5f2b7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfoTests; + +import java.io.IOException; + +public class DataFrameTransformCheckpointingInfoHlrcTests extends AbstractHlrcXContentTestCase< + DataFrameTransformCheckpointingInfo, + org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo> { + + public static DataFrameTransformCheckpointingInfo fromHlrc( + org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) { + return new DataFrameTransformCheckpointingInfo( + DataFrameTransformCheckpointStatsHlrcTests.fromHlrc(instance.getCurrent()), + DataFrameTransformCheckpointStatsHlrcTests.fromHlrc(instance.getInProgress()), + instance.getOperationsBehind()); + } + + @Override + public org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo doHlrcParseInstance(XContentParser parser) + throws IOException { + return org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo.fromXContent(parser); + } + + @Override + public DataFrameTransformCheckpointingInfo convertHlrcToInternal( + org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) { + return fromHlrc(instance); + } + + @Override + protected DataFrameTransformCheckpointingInfo createTestInstance() { + return DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo(); + } + + @Override + protected DataFrameTransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformCheckpointingInfo.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java index fe63c56941f..6ef473cfb95 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java @@ -29,7 +29,8 @@ public class DataFrameTransformStateAndStatsHlrcTests extends AbstractHlrcXConte org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats instance) { return new DataFrameTransformStateAndStats(instance.getId(), DataFrameTransformStateHlrcTests.fromHlrc(instance.getTransformState()), - DataFrameIndexerTransformStatsHlrcTests.fromHlrc(instance.getTransformStats())); + DataFrameIndexerTransformStatsHlrcTests.fromHlrc(instance.getTransformStats()), + DataFrameTransformCheckpointingInfoHlrcTests.fromHlrc(instance.getCheckpointingInfo())); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java index c6e4acd5c82..5b2fa6ec33d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java @@ -21,7 +21,7 @@ public class DataFrameTransformStateHlrcTests extends AbstractHlrcXContentTestCa public static DataFrameTransformState fromHlrc(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) { return new DataFrameTransformState(DataFrameTransformTaskState.fromString(instance.getTaskState().value()), - IndexerState.fromString(instance.getIndexerState().value()), instance.getPosition(), instance.getGeneration(), + IndexerState.fromString(instance.getIndexerState().value()), instance.getPosition(), instance.getCheckpoint(), instance.getReason()); } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 6b9300916a9..89bdafbde2b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -196,7 +196,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { // start the transform startDataframeTransform(transformId, false, authHeader); // wait until the dataframe has been created and all data is available - waitForDataFrameGeneration(transformId); + waitForDataFrameCheckpoint(transformId); refreshIndex(dataFrameIndex); } @@ -212,10 +212,10 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { return request; } - void waitForDataFrameGeneration(String transformId) throws Exception { + void waitForDataFrameCheckpoint(String transformId) throws Exception { assertBusy(() -> { - long generation = getDataFrameGeneration(transformId); - assertEquals(1, generation); + long checkpoint = getDataFrameCheckpoint(transformId); + assertEquals(1, checkpoint); }, 30, TimeUnit.SECONDS); } @@ -321,11 +321,11 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { } } - static int getDataFrameGeneration(String transformId) throws IOException { + static int getDataFrameCheckpoint(String transformId) throws IOException { Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); Map transformStatsAsMap = (Map) ((List) entityAsMap(statsResponse).get("transforms")).get(0); - return (int) XContentMapValues.extractValue("state.generation", transformStatsAsMap); + return (int) XContentMapValues.extractValue("state.checkpoint", transformStatsAsMap); } protected void setupDataAccessRole(String role, String... indices) throws IOException { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index de0757a30ba..96aeeda8755 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -60,7 +60,7 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { // Force start the data frame to indicate failure correction startDataframeTransform(transformId, true); // Wait for data to be indexed appropriately and refresh for search - waitForDataFrameGeneration(transformId); + waitForDataFrameCheckpoint(transformId); refreshIndex(dataFrameIndex); // Verify that we have started and that our reason is cleared diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index a32c01c43a0..4f689c6a5af 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -190,7 +190,7 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu DATA_FRAME_ORIGIN, DataFrameAuditMessage.builder())); dataFrameTransformsConfigManager.set(new DataFrameTransformsConfigManager(client, xContentRegistry)); - dataFrameTransformsCheckpointService.set(new DataFrameTransformsCheckpointService(client)); + dataFrameTransformsCheckpointService.set(new DataFrameTransformsCheckpointService(client, dataFrameTransformsConfigManager.get())); return Arrays.asList(dataFrameTransformsConfigManager.get(), dataFrameAuditor.get(), dataFrameTransformsCheckpointService.get()); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index eeabbe0b5eb..b350300392c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -45,7 +45,9 @@ import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStats import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Request; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Response; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; @@ -71,14 +73,18 @@ public class TransportGetDataFrameTransformsStatsAction extends private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final DataFrameTransformsCheckpointService transformsCheckpointService; + @Inject public TransportGetDataFrameTransformsStatsAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, Client client, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager) { + DataFrameTransformsConfigManager dataFrameTransformsConfigManager, + DataFrameTransformsCheckpointService transformsCheckpointService) { super(GetDataFrameTransformsStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); this.client = client; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.transformsCheckpointService = transformsCheckpointService; } @Override @@ -93,16 +99,22 @@ public class TransportGetDataFrameTransformsStatsAction extends @Override protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { - List transformsStateAndStats = Collections.emptyList(); - // Little extra insurance, make sure we only return transforms that aren't cancelled if (task.isCancelled() == false) { - DataFrameTransformStateAndStats transformStateAndStats = new DataFrameTransformStateAndStats(task.getTransformId(), - task.getState(), task.getStats()); - transformsStateAndStats = Collections.singletonList(transformStateAndStats); + transformsCheckpointService.getCheckpointStats(task.getTransformId(), task.getCheckpoint(), task.getInProgressCheckpoint(), + ActionListener.wrap(checkpointStats -> { + listener.onResponse(new Response(Collections.singletonList( + new DataFrameTransformStateAndStats(task.getTransformId(), task.getState(), task.getStats(), checkpointStats)))); + }, e -> { + listener.onResponse(new Response( + Collections.singletonList(new DataFrameTransformStateAndStats(task.getTransformId(), task.getState(), + task.getStats(), DataFrameTransformCheckpointingInfo.EMPTY)), + Collections.emptyList(), + Collections.singletonList(new ElasticsearchException("Failed to retrieve checkpointing info", e)))); + })); + } else { + listener.onResponse(new Response(Collections.emptyList())); } - - listener.onResponse(new Response(transformsStateAndStats)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java index 5d65016a890..5008934128e 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java @@ -9,6 +9,7 @@ package org.elasticsearch.xpack.dataframe.checkpoint; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; @@ -16,14 +17,19 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.Client; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; import java.util.Arrays; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; /** * DataFrameTransform Checkpoint Service @@ -35,12 +41,24 @@ import java.util.TreeMap; */ public class DataFrameTransformsCheckpointService { + private class Checkpoints { + DataFrameTransformCheckpoint currentCheckpoint = DataFrameTransformCheckpoint.EMPTY; + DataFrameTransformCheckpoint inProgressCheckpoint = DataFrameTransformCheckpoint.EMPTY; + DataFrameTransformCheckpoint sourceCheckpoint = DataFrameTransformCheckpoint.EMPTY; + } + private static final Logger logger = LogManager.getLogger(DataFrameTransformsCheckpointService.class); - private final Client client; + // timeout for retrieving checkpoint information + private static final int CHECKPOINT_STATS_TIMEOUT_SECONDS = 5; - public DataFrameTransformsCheckpointService(final Client client) { + private final Client client; + private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + + public DataFrameTransformsCheckpointService(final Client client, + final DataFrameTransformsConfigManager dataFrameTransformsConfigManager) { this.client = client; + this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; } /** @@ -84,6 +102,7 @@ public class DataFrameTransformsCheckpointService { Map checkpointsByIndex = extractIndexCheckPoints(response.getShards(), userIndices); DataFrameTransformCheckpoint checkpointDoc = new DataFrameTransformCheckpoint(transformConfig.getId(), timestamp, checkpoint, checkpointsByIndex, timeUpperBound); + listener.onResponse(checkpointDoc); }, IndicesStatsRequestException -> { @@ -96,6 +115,90 @@ public class DataFrameTransformsCheckpointService { } + /** + * Get checkpointing stats for a data frame + * + * Implementation details: + * - fires up to 3 requests _in parallel_ rather than cascading them + * + * @param transformId The data frame task + * @param currentCheckpoint the current checkpoint + * @param inProgressCheckpoint in progress checkpoint + * @param listener listener to retrieve the result + */ + public void getCheckpointStats( + String transformId, + long currentCheckpoint, + long inProgressCheckpoint, + ActionListener listener) { + + // process in parallel: current checkpoint, in-progress checkpoint, current state of the source + CountDownLatch latch = new CountDownLatch(3); + + // ensure listener is called exactly once + final ActionListener wrappedListener = ActionListener.notifyOnce(listener); + + // holder structure for writing the results of the 3 parallel tasks + Checkpoints checkpoints = new Checkpoints(); + + // get the current checkpoint + if (currentCheckpoint != 0) { + dataFrameTransformsConfigManager.getTransformCheckpoint(transformId, currentCheckpoint, + new LatchedActionListener<>(ActionListener.wrap(checkpoint -> checkpoints.currentCheckpoint = checkpoint, e -> { + logger.debug("Failed to retrieve checkpoint [" + currentCheckpoint + "] for data frame []" + transformId, e); + wrappedListener + .onFailure(new CheckpointException("Failed to retrieve current checkpoint [" + currentCheckpoint + "]", e)); + }), latch)); + } else { + latch.countDown(); + } + + // get the in-progress checkpoint + if (inProgressCheckpoint != 0) { + dataFrameTransformsConfigManager.getTransformCheckpoint(transformId, inProgressCheckpoint, + new LatchedActionListener<>(ActionListener.wrap(checkpoint -> checkpoints.inProgressCheckpoint = checkpoint, e -> { + logger.debug("Failed to retrieve in progress checkpoint [" + inProgressCheckpoint + "] for data frame [" + + transformId + "]", e); + wrappedListener.onFailure( + new CheckpointException("Failed to retrieve in progress checkpoint [" + inProgressCheckpoint + "]", e)); + }), latch)); + } else { + latch.countDown(); + } + + // get the current state + dataFrameTransformsConfigManager.getTransformConfiguration(transformId, ActionListener.wrap(transformConfig -> { + getCheckpoint(transformConfig, + new LatchedActionListener<>(ActionListener.wrap(checkpoint -> checkpoints.sourceCheckpoint = checkpoint, e2 -> { + logger.debug("Failed to retrieve actual checkpoint for data frame [" + transformId + "]", e2); + wrappedListener.onFailure(new CheckpointException("Failed to retrieve actual checkpoint", e2)); + }), latch)); + }, e -> { + logger.warn("Failed to retrieve configuration for data frame [" + transformId + "]", e); + wrappedListener.onFailure(new CheckpointException("Failed to retrieve configuration", e)); + latch.countDown(); + })); + + try { + if (latch.await(CHECKPOINT_STATS_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { + logger.debug("Retrieval of checkpoint information succeeded for data frame [" + transformId + "]"); + wrappedListener.onResponse(new DataFrameTransformCheckpointingInfo( + new DataFrameTransformCheckpointStats(checkpoints.currentCheckpoint.getTimestamp(), + checkpoints.currentCheckpoint.getTimeUpperBound()), + new DataFrameTransformCheckpointStats(checkpoints.inProgressCheckpoint.getTimestamp(), + checkpoints.inProgressCheckpoint.getTimeUpperBound()), + DataFrameTransformCheckpoint.getBehind(checkpoints.currentCheckpoint, checkpoints.sourceCheckpoint))); + } else { + // timed out + logger.debug("Retrieval of checkpoint information has timed out for data frame [" + transformId + "]"); + wrappedListener.onFailure(new CheckpointException("Retrieval of checkpoint information has timed out")); + } + } catch (InterruptedException e) { + logger.debug("Failed to retrieve checkpoints for data frame [" + transformId + "]", e); + wrappedListener.onFailure(new CheckpointException("Failure during checkpoint info retrieval", e)); + } + } + static Map extractIndexCheckPoints(ShardStats[] shards, Set userIndices) { Map> checkpointsByIndex = new TreeMap<>(); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java index a19a0b65d85..f8b8bc6dc83 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -45,8 +45,8 @@ import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; import java.io.IOException; import java.io.InputStream; @@ -148,6 +148,7 @@ public class DataFrameTransformsConfigManager { if (getResponse.isExists() == false) { // do not fail if checkpoint does not exist but return an empty checkpoint + logger.trace("found no checkpoint for transform [" + transformId + "], returning empty checkpoint"); resultListener.onResponse(DataFrameTransformCheckpoint.EMPTY); return; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index 238e531bba9..c781d05f189 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -51,12 +51,23 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer getFieldMappings(); + /** + * Request a checkpoint + */ + protected abstract void createCheckpoint(ActionListener listener); + @Override protected void onStart(long now, ActionListener listener) { try { QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery(); pivot = new Pivot(getConfig().getSource().getIndex(), queryBuilder, getConfig().getPivotConfig()); - listener.onResponse(null); + + // if run for the 1st time, create checkpoint + if (getPosition() == null) { + createCheckpoint(listener); + } else { + listener.onResponse(null); + } } catch (Exception e) { listener.onFailure(e); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpoint.java similarity index 74% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java rename to x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpoint.java index f14415489f1..f5d2dbad693 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpoint.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.dataframe.transforms; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; @@ -35,7 +35,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona * The fields: * * timestamp the timestamp when this document has been created - * checkpoint the checkpoint number, incremented for every checkpoint + * checkpoint the checkpoint number, incremented for every checkpoint, if -1 this is a non persisted checkpoint * indices a map of the indices from the source including all checkpoints of all indices matching the source pattern, shard level * time_upper_bound for time-based indices this holds the upper time boundary of this checkpoint * @@ -44,21 +44,12 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject public static DataFrameTransformCheckpoint EMPTY = new DataFrameTransformCheckpoint("empty", 0L, -1L, Collections.emptyMap(), 0L); - // the timestamp of the checkpoint, mandatory - public static final ParseField TIMESTAMP_MILLIS = new ParseField("timestamp_millis"); - public static final ParseField TIMESTAMP = new ParseField("timestamp"); - // the own checkpoint public static final ParseField CHECKPOINT = new ParseField("checkpoint"); // checkpoint of the indexes (sequence id's) public static final ParseField INDICES = new ParseField("indices"); - // checkpoint for for time based sync - // TODO: consider a lower bound for usecases where you want to transform on a window of a stream - public static final ParseField TIME_UPPER_BOUND_MILLIS = new ParseField("time_upper_bound_millis"); - public static final ParseField TIME_UPPER_BOUND = new ParseField("time_upper_bound"); - private static final String NAME = "data_frame_transform_checkpoint"; private static final ConstructingObjectParser STRICT_PARSER = createParser(false); @@ -89,7 +80,7 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject parser.declareString(constructorArg(), DataFrameField.ID); // note: this is never parsed from the outside where timestamp can be formatted as date time - parser.declareLong(constructorArg(), TIMESTAMP_MILLIS); + parser.declareLong(constructorArg(), DataFrameField.TIMESTAMP_MILLIS); parser.declareLong(constructorArg(), CHECKPOINT); parser.declareObject(constructorArg(), (p,c) -> { @@ -111,7 +102,7 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject } return checkPointsByIndexName; }, INDICES); - parser.declareLong(optionalConstructorArg(), TIME_UPPER_BOUND_MILLIS); + parser.declareLong(optionalConstructorArg(), DataFrameField.TIME_UPPER_BOUND_MILLIS); parser.declareString(optionalConstructorArg(), DataFrameField.INDEX_DOC_TYPE); return parser; @@ -134,29 +125,47 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject this.timeUpperBoundMillis = in.readLong(); } + public boolean isEmpty() { + return indicesCheckpoints.isEmpty(); + } + + /** + * Whether this checkpoint is a transient (non persisted) checkpoint + * + * @return true if this is a transient checkpoint, false otherwise + */ + public boolean isTransient() { + return checkpoint == -1; + } + + /** + * Create XContent for the purpose of storing it in the internal index + * + * Note: + * @param builder the {@link XContentBuilder} + * @param params builder specific parameters + * + * @return builder instance + */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - // the id, doc_type and checkpoint is only internally used for storage, the user-facing version gets embedded - if (params.paramAsBoolean(DataFrameField.FOR_INTERNAL_STORAGE, false)) { - builder.field(DataFrameField.ID.getPreferredName(), transformId); - builder.field(CHECKPOINT.getPreferredName(), checkpoint); - builder.field(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), NAME); - } - - builder.timeField(TIMESTAMP_MILLIS.getPreferredName(), TIMESTAMP.getPreferredName(), timestampMillis); - - if (timeUpperBoundMillis > 0) { - builder.timeField(TIME_UPPER_BOUND_MILLIS.getPreferredName(), TIME_UPPER_BOUND.getPreferredName(), timeUpperBoundMillis); - } - + builder.field(DataFrameField.ID.getPreferredName(), transformId); + builder.field(CHECKPOINT.getPreferredName(), checkpoint); + builder.field(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), NAME); builder.startObject(INDICES.getPreferredName()); for (Entry entry : indicesCheckpoints.entrySet()) { builder.array(entry.getKey(), entry.getValue()); } builder.endObject(); + builder.field(DataFrameField.TIMESTAMP_MILLIS.getPreferredName(), timestampMillis); + + if (timeUpperBoundMillis > 0) { + builder.field(DataFrameField.TIME_UPPER_BOUND_MILLIS.getPreferredName(), timeUpperBoundMillis); + } + builder.endObject(); return builder; } @@ -249,6 +258,53 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject return NAME + "-" + transformId + "-" + checkpoint; } + /** + * Calculate the diff of 2 checkpoints + * + * This is to get an indicator for the difference between checkpoints. + * + * Note: order is important + * + * @param oldCheckpoint the older checkpoint, if transient, newer must be transient, too + * @param newCheckpoint the newer checkpoint, can be a transient checkpoint + * + * @return count number of operations the checkpoint is behind or -1L if it could not calculate the difference + */ + public static long getBehind(DataFrameTransformCheckpoint oldCheckpoint, DataFrameTransformCheckpoint newCheckpoint) { + if (oldCheckpoint.isTransient()) { + if (newCheckpoint.isTransient() == false) { + throw new IllegalArgumentException("can not compare transient against a non transient checkpoint"); + } // else: both are transient + } else if (newCheckpoint.isTransient() == false && oldCheckpoint.getCheckpoint() > newCheckpoint.getCheckpoint()) { + throw new IllegalArgumentException("old checkpoint is newer than new checkpoint"); + } + + // all old indices must be contained in the new ones but not vice versa + if (newCheckpoint.indicesCheckpoints.keySet().containsAll(oldCheckpoint.indicesCheckpoints.keySet()) == false) { + return -1L; + } + + // get the sum of of shard checkpoints + // note: we require shard checkpoints to strictly increase and never decrease + long oldCheckPointSum = 0; + long newCheckPointSum = 0; + + for (long[] v : oldCheckpoint.indicesCheckpoints.values()) { + oldCheckPointSum += Arrays.stream(v).sum(); + } + + for (long[] v : newCheckpoint.indicesCheckpoints.values()) { + newCheckPointSum += Arrays.stream(v).sum(); + } + + // this should not be possible + if (newCheckPointSum < oldCheckPointSum) { + return -1L; + } + + return newCheckPointSum - oldCheckPointSum; + } + private static Map readCheckpoints(Map readMap) { Map checkpoints = new TreeMap<>(); for (Map.Entry e : readMap.entrySet()) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index a4f3df8cbe5..69ceb32dfc7 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -48,6 +48,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -68,10 +69,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S private final AtomicReference taskState; private final AtomicReference stateReason; - // the generation of this data frame, for v1 there will be only - // 0: data frame not created or still indexing - // 1: data frame complete, all data has been indexed - private final AtomicReference generation; + // the checkpoint of this data frame, storing the checkpoint until data indexing from source to dest is _complete_ + // Note: Each indexer run creates a new future checkpoint which becomes the current checkpoint only after the indexer run finished + private final AtomicLong currentCheckpoint; private final AtomicInteger failureCount; public DataFrameTransformTask(long id, String type, String action, TaskId parentTask, DataFrameTransform transform, @@ -105,12 +105,12 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S initialState = existingState; } initialPosition = state.getPosition(); - initialGeneration = state.getGeneration(); + initialGeneration = state.getCheckpoint(); } this.indexer = new ClientDataFrameIndexer(transform.getId(), transformsConfigManager, transformsCheckpointService, new AtomicReference<>(initialState), initialPosition, client, auditor); - this.generation = new AtomicReference<>(initialGeneration); + this.currentCheckpoint = new AtomicLong(initialGeneration); this.previousStats = new DataFrameIndexerTransformStats(transform.getId()); this.taskState = new AtomicReference<>(initialTaskState); this.stateReason = new AtomicReference<>(initialReason); @@ -130,7 +130,12 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } public DataFrameTransformState getState() { - return new DataFrameTransformState(taskState.get(), indexer.getState(), indexer.getPosition(), generation.get(), stateReason.get()); + return new DataFrameTransformState( + taskState.get(), + indexer.getState(), + indexer.getPosition(), + currentCheckpoint.get(), + stateReason.get()); } void initializePreviousStats(DataFrameIndexerTransformStats stats) { @@ -141,8 +146,17 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return new DataFrameIndexerTransformStats(previousStats).merge(indexer.getStats()); } - public long getGeneration() { - return generation.get(); + public long getCheckpoint() { + return currentCheckpoint.get(); + } + + /** + * Get the in-progress checkpoint + * + * @return checkpoint in progress or 0 if task/indexer is not active + */ + public long getInProgressCheckpoint() { + return indexer.getState().equals(IndexerState.INDEXING) ? currentCheckpoint.get() + 1L : 0; } public boolean isStopped() { @@ -164,7 +178,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S DataFrameTransformTaskState.STARTED, IndexerState.STOPPED, indexer.getPosition(), - generation.get(), + currentCheckpoint.get(), null); logger.info("Updating state for data frame transform [{}] to [{}]", transform.getId(), state.toString()); @@ -203,7 +217,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S DataFrameTransformTaskState.STOPPED, IndexerState.STOPPED, indexer.getPosition(), - generation.get(), + currentCheckpoint.get(), stateReason.get()); persistStateToClusterState(state, ActionListener.wrap( task -> { @@ -224,7 +238,8 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override public synchronized void triggered(Event event) { - if (generation.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) { + // for now no rerun, so only trigger if checkpoint == 0 + if (currentCheckpoint.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) { logger.debug("Data frame indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); indexer.maybeTriggerAsyncJob(System.currentTimeMillis()); } @@ -298,6 +313,8 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S protected class ClientDataFrameIndexer extends DataFrameIndexer { private static final int LOAD_TRANSFORM_TIMEOUT_IN_SECONDS = 30; + private static final int CREATE_CHECKPOINT_TIMEOUT_IN_SECONDS = 30; + private final Client client; private final DataFrameTransformsConfigManager transformsConfigManager; private final DataFrameTransformsCheckpointService transformsCheckpointService; @@ -413,21 +430,11 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return; } - if(indexerState.equals(IndexerState.STARTED) && getStats().getNumDocuments() > 0) { - // if the indexer resets the state to started, it means it is done with a run through the data. - // But, if there were no documents, we should allow it to attempt to gather more again, as there is no risk of overwriting - // Some reasons for no documents are (but is not limited to): - // * Could have failed early on search or index - // * Have an empty index - // * Have a query that returns no documents - generation.compareAndSet(0L, 1L); - } - final DataFrameTransformState state = new DataFrameTransformState( taskState.get(), indexerState, getPosition(), - generation.get(), + currentCheckpoint.get(), stateReason.get()); logger.info("Updating persistent state of transform [" + transform.getId() + "] to [" + state.toString() + "]"); @@ -480,8 +487,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void onFinish(ActionListener listener) { try { - auditor.info(transform.getId(), "Finished indexing for data frame transform"); - logger.info("Finished indexing for data frame transform [" + transform.getId() + "]"); + long checkpoint = currentCheckpoint.incrementAndGet(); + auditor.info(transform.getId(), "Finished indexing for data frame transform checkpoint [" + checkpoint + "]"); + logger.info("Finished indexing for data frame transform [" + transform.getId() + "] checkpoint [" + checkpoint + "]"); listener.onResponse(null); } catch (Exception e) { listener.onFailure(e); @@ -494,6 +502,19 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S logger.info("Data frame transform [" + transform.getId() + "] received abort request, stopping indexer"); shutdown(); } + + @Override + protected void createCheckpoint(ActionListener listener) { + transformsCheckpointService.getCheckpoint(transformConfig, currentCheckpoint.get() + 1, ActionListener.wrap(checkpoint -> { + transformsConfigManager.putTransformCheckpoint(checkpoint, ActionListener.wrap(putCheckPointResponse -> { + listener.onResponse(null); + }, createCheckpointException -> { + listener.onFailure(new RuntimeException("Failed to create checkpoint", createCheckpointException)); + })); + }, getCheckPointException -> { + listener.onFailure(new RuntimeException("Failed to retrieve checkpoint", getCheckPointException)); + })); + } } class DataFrameConfigurationException extends RuntimeException { diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameSingleNodeTestCase.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java similarity index 95% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameSingleNodeTestCase.java rename to x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java index 687e94cdb3a..cdd54d86c7e 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameSingleNodeTestCase.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.dataframe; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -15,7 +15,7 @@ import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.template.TemplateUtils; -import org.elasticsearch.xpack.dataframe.LocalStateDataFrame; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; import java.util.Collection; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java new file mode 100644 index 00000000000..4bcfef68694 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java @@ -0,0 +1,258 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.checkpoint; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.cache.request.RequestCacheStats; +import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.fielddata.FieldDataStats; +import org.elasticsearch.index.flush.FlushStats; +import org.elasticsearch.index.get.GetStats; +import org.elasticsearch.index.merge.MergeStats; +import org.elasticsearch.index.refresh.RefreshStats; +import org.elasticsearch.index.search.stats.SearchStats; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.index.warmer.WarmerStats; +import org.elasticsearch.search.suggest.completion.CompletionStats; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingleNodeTestCase { + + private DataFrameTransformsConfigManager transformsConfigManager; + private MockClientForCheckpointing mockClientForCheckpointing; + private DataFrameTransformsCheckpointService transformsCheckpointService; + + private class MockClientForCheckpointing extends NoOpClient { + + private ShardStats[] shardStats; + private String[] indices; + + MockClientForCheckpointing(String testName) { + super(testName); + } + + public void setShardStats(ShardStats[] shardStats) { + this.shardStats = shardStats; + + Set indices = new HashSet<>(); + for (ShardStats s:shardStats) { + indices.add(s.getShardRouting().getIndexName()); + } + + this.indices = indices.toArray(new String[0]); + } + + @SuppressWarnings("unchecked") + @Override + protected void doExecute(Action action, Request request, + ActionListener listener) { + + if (request instanceof GetIndexRequest) { + // for this test we only need the indices + final GetIndexResponse indexResponse = new GetIndexResponse(indices, null, null, null, null); + + listener.onResponse((Response) indexResponse); + return; + } else if (request instanceof IndicesStatsRequest) { + + // IndicesStatsResponse is package private, therefore using a mock + final IndicesStatsResponse indicesStatsResponse = mock(IndicesStatsResponse.class); + when(indicesStatsResponse.getShards()).thenReturn(shardStats); + when(indicesStatsResponse.getFailedShards()).thenReturn(0); + + + listener.onResponse((Response) indicesStatsResponse); + return; + } + + super.doExecute(action, request, listener); + } + } + + @Before + public void createComponents() { + transformsConfigManager = new DataFrameTransformsConfigManager(client(), xContentRegistry()); + + // use a mock for the checkpoint service + mockClientForCheckpointing = new MockClientForCheckpointing(getTestName()); + transformsCheckpointService = new DataFrameTransformsCheckpointService(mockClientForCheckpointing, transformsConfigManager); + } + + @After + public void tearDownClient() { + mockClientForCheckpointing.close(); + } + + public void testCreateReadDeleteCheckpoint() throws InterruptedException { + String transformId = randomAlphaOfLengthBetween(3, 10); + long timestamp = 1000; + + DataFrameTransformCheckpoint checkpoint = new DataFrameTransformCheckpoint(transformId, timestamp, 1L, + createCheckPointMap(transformId, 10, 10, 10), null); + + // create transform + assertAsync( + listener -> transformsConfigManager + .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId), listener), + true, null, null); + + // by design no exception is thrown but an empty checkpoint is returned + assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), + DataFrameTransformCheckpoint.EMPTY, null, null); + + assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint, listener), true, null, null); + + assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), checkpoint, null, null); + + // add a 2nd checkpoint + DataFrameTransformCheckpoint checkpoint2 = new DataFrameTransformCheckpoint(transformId, timestamp + 100L, 2L, + createCheckPointMap(transformId, 20, 20, 20), null); + + assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint2, listener), true, null, null); + + // both checkpoints should be there + assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), checkpoint, null, null); + assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 2L, listener), checkpoint2, null, null); + + // delete transform + assertAsync(listener -> transformsConfigManager.deleteTransform(transformId, listener), true, null, null); + + // checkpoints should be empty again + assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), + DataFrameTransformCheckpoint.EMPTY, null, null); + + assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 2L, listener), + DataFrameTransformCheckpoint.EMPTY, null, null); + } + + public void testGetCheckpointStats() throws InterruptedException { + String transformId = randomAlphaOfLengthBetween(3, 10); + long timestamp = 1000; + + // create transform + assertAsync( + listener -> transformsConfigManager + .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId), listener), + true, null, null); + + DataFrameTransformCheckpoint checkpoint = new DataFrameTransformCheckpoint(transformId, timestamp, 1L, + createCheckPointMap(transformId, 10, 10, 10), null); + + assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint, listener), true, null, null); + + DataFrameTransformCheckpoint checkpoint2 = new DataFrameTransformCheckpoint(transformId, timestamp + 100L, 2L, + createCheckPointMap(transformId, 20, 20, 20), null); + + assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint2, listener), true, null, null); + + mockClientForCheckpointing.setShardStats(createShardStats(createCheckPointMap(transformId, 20, 20, 20))); + DataFrameTransformCheckpointingInfo checkpointInfo = new DataFrameTransformCheckpointingInfo( + new DataFrameTransformCheckpointStats(timestamp, 0L), + new DataFrameTransformCheckpointStats(timestamp + 100L, 0L), + 30L); + + assertAsync(listener -> transformsCheckpointService.getCheckpointStats(transformId, 1, 2, listener), checkpointInfo, null, null); + + mockClientForCheckpointing.setShardStats(createShardStats(createCheckPointMap(transformId, 10, 50, 33))); + checkpointInfo = new DataFrameTransformCheckpointingInfo( + new DataFrameTransformCheckpointStats(timestamp, 0L), + new DataFrameTransformCheckpointStats(timestamp + 100L, 0L), + 63L); + assertAsync(listener -> transformsCheckpointService.getCheckpointStats(transformId, 1, 2, listener), checkpointInfo, null, null); + + // same as current + mockClientForCheckpointing.setShardStats(createShardStats(createCheckPointMap(transformId, 10, 10, 10))); + checkpointInfo = new DataFrameTransformCheckpointingInfo( + new DataFrameTransformCheckpointStats(timestamp, 0L), + new DataFrameTransformCheckpointStats(timestamp + 100L, 0L), + 0L); + assertAsync(listener -> transformsCheckpointService.getCheckpointStats(transformId, 1, 2, listener), checkpointInfo, null, null); + } + + private static Map createCheckPointMap(String index, long checkpointShard1, long checkpointShard2, + long checkpointShard3) { + return Collections.singletonMap(index, new long[] { checkpointShard1, checkpointShard2, checkpointShard3 }); + } + + private static ShardStats[] createShardStats(Map checkpoints) { + List shardStats = new ArrayList<>(); + + for (Entry entry : checkpoints.entrySet()) { + + for (int i = 0; i < entry.getValue().length; ++i) { + long checkpoint = entry.getValue()[i]; + CommonStats stats = new CommonStats(); + stats.fieldData = new FieldDataStats(); + stats.queryCache = new QueryCacheStats(); + stats.docs = new DocsStats(); + stats.store = new StoreStats(); + stats.indexing = new IndexingStats(); + stats.search = new SearchStats(); + stats.segments = new SegmentsStats(); + stats.merge = new MergeStats(); + stats.refresh = new RefreshStats(); + stats.completion = new CompletionStats(); + stats.requestCache = new RequestCacheStats(); + stats.get = new GetStats(); + stats.flush = new FlushStats(); + stats.warmer = new WarmerStats(); + + SeqNoStats seqNoStats = new SeqNoStats(checkpoint, checkpoint, checkpoint); + Index index = new Index(entry.getKey(), UUIDs.randomBase64UUID(random())); + ShardId shardId = new ShardId(index, i); + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(i)); + + shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, seqNoStats, null)); + } + + } + return shardStats.toArray(new ShardStats[0]); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java index 8fe384553bc..8c59cf00be0 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -10,10 +10,11 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointTests; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpointTests; import org.junit.Before; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java similarity index 50% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java rename to x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java index 964d08fad20..4124b33ddf1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java @@ -4,18 +4,17 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.dataframe.transforms; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.dataframe.transforms.AbstractSerializingDataFrameTestCase; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -44,11 +43,6 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr return DataFrameTransformCheckpoint::new; } - @Override - protected ToXContent.Params getToXContentParams() { - return TO_XCONTENT_PARAMS; - } - public void testXContentForInternalStorage() throws IOException { DataFrameTransformCheckpoint dataFrameTransformCheckpoints = randomDataFrameTransformCheckpoints(); @@ -58,28 +52,6 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr assertThat(doc, matchesPattern(".*\"doc_type\"\\s*:\\s*\"data_frame_transform_checkpoint\".*")); } - - try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { - XContentBuilder content = dataFrameTransformCheckpoints.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); - String doc = Strings.toString(content); - - assertFalse(doc.contains("doc_type")); - } - } - - public void testXContentForApiUsage() throws IOException { - DataFrameTransformCheckpoint dataFrameTransformCheckpoints = new DataFrameTransformCheckpoint(randomAlphaOfLengthBetween(1, 10), - 1546300800000L, randomNonNegativeLong(), Collections.emptyMap(), 1545609600000L); - - try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { - xContentBuilder.humanReadable(true); - XContentBuilder content = dataFrameTransformCheckpoints.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); - String doc = Strings.toString(content); - assertThat(doc, matchesPattern(".*\"timestamp_millis\"\\s*:\\s*1546300800000.*")); - assertThat(doc, matchesPattern(".*\"time_upper_bound_millis\"\\s*:\\s*1545609600000.*")); - assertThat(doc, matchesPattern(".*\"timestamp\"\\s*:\\s*\"2019-01-01T00:00:00.000Z\".*")); - assertThat(doc, matchesPattern(".*\"time_upper_bound\"\\s*:\\s*\"2018-12-24T00:00:00.000Z\".*")); - } } public void testMatches() throws IOException { @@ -119,12 +91,90 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr .matches(new DataFrameTransformCheckpoint(id, timestamp, checkpoint, checkpointsByIndex, (timeUpperBound / 2) + 1))); } + public void testGetBehind() { + String id = randomAlphaOfLengthBetween(1, 10); + long timestamp = randomNonNegativeLong(); + + TreeMap checkpointsByIndexOld = new TreeMap<>(); + TreeMap checkpointsByIndexNew = new TreeMap<>(); + + int indices = randomIntBetween(3, 10); + int shards = randomIntBetween(1, 20); + + for (int i = 0; i < indices; ++i) { + List checkpoints1 = new ArrayList<>(); + List checkpoints2 = new ArrayList<>(); + + for (int j = 0; j < shards; ++j) { + long shardCheckpoint = randomLongBetween(0, 1_000_000); + checkpoints1.add(shardCheckpoint); + checkpoints2.add(shardCheckpoint + 10); + } + + String indexName = randomAlphaOfLengthBetween(1, 10); + + checkpointsByIndexOld.put(indexName, checkpoints1.stream().mapToLong(l -> l).toArray()); + checkpointsByIndexNew.put(indexName, checkpoints2.stream().mapToLong(l -> l).toArray()); + } + + long checkpoint = randomLongBetween(10, 100); + + DataFrameTransformCheckpoint checkpointOld = new DataFrameTransformCheckpoint( + id, timestamp, checkpoint, checkpointsByIndexOld, 0L); + DataFrameTransformCheckpoint checkpointTransientNew = new DataFrameTransformCheckpoint( + id, timestamp, -1L, checkpointsByIndexNew, 0L); + DataFrameTransformCheckpoint checkpointNew = new DataFrameTransformCheckpoint( + id, timestamp, checkpoint + 1, checkpointsByIndexNew, 0L); + DataFrameTransformCheckpoint checkpointOlderButNewerShardsCheckpoint = new DataFrameTransformCheckpoint( + id, timestamp, checkpoint - 1, checkpointsByIndexNew, 0L); + + assertEquals(indices * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + assertEquals(indices * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointNew)); + + // no difference for same checkpoints, transient or not + assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointOld)); + assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointTransientNew, checkpointTransientNew)); + assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointNew, checkpointNew)); + + // new vs transient new: ok + assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointNew, checkpointTransientNew)); + + // transient new vs new: illegal + Exception e = expectThrows(IllegalArgumentException.class, + () -> DataFrameTransformCheckpoint.getBehind(checkpointTransientNew, checkpointNew)); + assertEquals("can not compare transient against a non transient checkpoint", e.getMessage()); + + // new vs old: illegal + e = expectThrows(IllegalArgumentException.class, () -> DataFrameTransformCheckpoint.getBehind(checkpointNew, checkpointOld)); + assertEquals("old checkpoint is newer than new checkpoint", e.getMessage()); + + // corner case: the checkpoint appears older but the inner shard checkpoints are newer + assertEquals(-1L, DataFrameTransformCheckpoint.getBehind(checkpointOlderButNewerShardsCheckpoint, checkpointOld)); + + // test cases where indices sets do not match + // remove something from old, so newer has 1 index more than old + checkpointsByIndexOld.remove(checkpointsByIndexOld.firstKey()); + long behind = DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew); + assertTrue("Expected behind (" + behind + ") > sum of shard checkpoints (" + indices * shards * 10L + ")", + behind > indices * shards * 10L); + + // remove same key: old and new should have equal indices again + checkpointsByIndexNew.remove(checkpointsByIndexNew.firstKey()); + assertEquals((indices - 1) * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + + // remove 1st index from new, now old has 1 index more, behind can not be calculated + checkpointsByIndexNew.remove(checkpointsByIndexNew.firstKey()); + assertEquals(-1L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + } + private static Map randomCheckpointsByIndex() { Map checkpointsByIndex = new TreeMap<>(); - for (int i = 0; i < randomIntBetween(1, 10); ++i) { + int indices = randomIntBetween(1, 10); + for (int i = 0; i < indices; ++i) { List checkpoints = new ArrayList<>(); - for (int j = 0; j < randomIntBetween(1, 20); ++j) { - checkpoints.add(randomNonNegativeLong()); + int shards = randomIntBetween(1, 20); + for (int j = 0; j < shards; ++j) { + checkpoints.add(randomLongBetween(0, 1_000_000)); } checkpointsByIndex.put(randomAlphaOfLengthBetween(1, 10), checkpoints.stream().mapToLong(l -> l).toArray()); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 88f1e43fd11..24533098537 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -48,7 +48,7 @@ teardown: - match: { transforms.0.id: "airline-transform-stats" } - match: { transforms.0.state.indexer_state: "started" } - match: { transforms.0.state.task_state: "started" } - - match: { transforms.0.state.generation: 0 } + - match: { transforms.0.state.checkpoint: 0 } - match: { transforms.0.stats.pages_processed: 0 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } @@ -196,7 +196,7 @@ teardown: - match: { count: 1 } - match: { transforms.0.id: "airline-transform-stats-dos" } - match: { transforms.0.state.indexer_state: "stopped" } - - match: { transforms.0.state.generation: 0 } + - match: { transforms.0.state.checkpoint: 0 } - match: { transforms.0.stats.pages_processed: 0 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } From 683cf56982a389d5599b1dcc425f18583d454cea Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 10 Apr 2019 11:33:49 +0200 Subject: [PATCH 26/60] Update headline of the "removal of types" doc page to match changes in 7.0. (#40868) Currently it describes what broke in 6.0. --- docs/reference/mapping/removal_of_types.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index ab6bfb39d8d..a5b0a4e8941 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -1,11 +1,11 @@ [[removal-of-types]] == Removal of mapping types -IMPORTANT: Indices created in Elasticsearch 6.0.0 or later may only contain a -single <>. Indices created in 5.x with multiple -mapping types will continue to function as before in Elasticsearch 6.x. -Types will be deprecated in APIs in Elasticsearch 7.0.0, and completely -removed in 8.0.0. +IMPORTANT: Indices created in Elasticsearch 7.0.0 or later no longer accept a +`_default_` mapping. Indices created in 6.x will continue to function as before +in Elasticsearch 6.x. Types are deprecated in APIs in 7.0, with breaking changes +to the index creation, put mapping, get mapping, put template, get template and +get field mappings APIs. [float] === What are mapping types? From ecbb588a140eb01a937e9a1dc59bf5f740c12da7 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 10 Apr 2019 11:15:45 +0200 Subject: [PATCH 27/60] Fix command line to run doc tests in docs/README. (#40959) It runs `gradle` instead of `gradlew` and has an extra `\`. --- docs/README.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 176d3324cd1..5aca8cb56c2 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -6,7 +6,7 @@ See: https://github.com/elastic/docs Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested by the command `gradle :docs:check`. To test just the docs from a single page, -use e.g. `gradle :docs:check -Dtests.method="\*rollover*"`. +use e.g. `./gradlew :docs:integTestRunner --tests "*rollover*"`. NOTE: If you have an elasticsearch-extra folder alongside your elasticsearch folder, you must temporarily rename it when you are testing 6.3 or later branches. From 4263a28039f73ee579da70bb095ed3b06b1b5138 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 10 Apr 2019 11:37:37 +0200 Subject: [PATCH 28/60] Fix rewrite of inner queries in DisMaxQueryBuilder (#40956) This commit implements missing rewrite for the DisMaxQueryBuilder. Closes #40953 --- .../index/query/DisMaxQueryBuilder.java | 21 +++++++++++++++++++ .../index/query/DisMaxQueryBuilderTests.java | 15 +++++++++++++ 2 files changed, 36 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java index 0e2a19e2b07..0fbd4cd4fc5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java @@ -190,6 +190,27 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder return new DisjunctionMaxQuery(luceneQueries, tieBreaker); } + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + DisMaxQueryBuilder newBuilder = new DisMaxQueryBuilder(); + boolean changed = false; + for (QueryBuilder query : queries) { + QueryBuilder result = query.rewrite(queryShardContext); + if (result != query) { + changed = true; + } + newBuilder.add(result); + } + if (changed) { + newBuilder.queryName(queryName); + newBuilder.boost(boost); + newBuilder.tieBreaker(tieBreaker); + return newBuilder; + } else { + return this; + } + } + @Override protected int doHashCode() { return Objects.hash(queries, tieBreaker); diff --git a/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java index ef98c67e56e..7dc97a66a5a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java @@ -151,4 +151,19 @@ public class DisMaxQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 10 Apr 2019 13:14:18 +0300 Subject: [PATCH 29/60] SQL: Change schema calls to empty set (#41034) As empty string has a certain meaning, the JDBC driver returns an empty set instead for better client compatibility. Fix #41028 (cherry picked from commit 4cbafa585b7a514eb6c156606dd516324cd3980a) --- .../xpack/sql/jdbc/JdbcDatabaseMetaData.java | 16 ++++------------ .../sql/jdbc/JdbcDatabaseMetaDataTests.java | 5 +++++ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index 4d5b6eae2e1..8fd4417f4a9 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -754,22 +754,14 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getSchemas() throws SQLException { - Object[][] data = { { EMPTY, defaultCatalog() } }; - return memorySet(con.cfg, columnInfo("SCHEMATA", - "TABLE_SCHEM", - "TABLE_CATALOG"), data); + return emptySet(con.cfg, "SCHEMATA", + "TABLE_SCHEM", + "TABLE_CATALOG"); } @Override public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { - List info = columnInfo("SCHEMATA", - "TABLE_SCHEM", - "TABLE_CATALOG"); - if (!isDefaultCatalog(catalog) || !isDefaultSchema(schemaPattern)) { - return emptySet(con.cfg, info); - } - Object[][] data = { { EMPTY, defaultCatalog() } }; - return memorySet(con.cfg, info, data); + return getSchemas(); } @Override diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java index 065807117ea..2ce98f8446a 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java @@ -104,6 +104,11 @@ public class JdbcDatabaseMetaDataTests extends ESTestCase { testEmptySet(() -> md.getPseudoColumns(null, null, null, null)); } + public void testGetSchemas() throws Exception { + testEmptySet(() -> md.getSchemas()); + testEmptySet(() -> md.getSchemas(null, null)); + } + private static void testEmptySet(CheckedSupplier supplier) throws SQLException { try (ResultSet result = supplier.get()) { assertNotNull(result); From adf3393a4eb6b2d90bf298df1e0793795ea007c2 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 10 Apr 2019 15:02:33 +0300 Subject: [PATCH 30/60] Deprecate permission over aliases (#38059) (#41060) This PR generates deprecation log entries for each Role Descriptor, used for building a Role, when the Role Descriptor grants more privileges for an alias compared to an index that the alias points to. This is done in preparation for the removal of the ability to define privileges over aliases. There is one log entry for each "role descriptor name"-"alias name" pair. On such a notice, the administrator is expected to modify the Role Descriptor definition so that the name pattern for index names does not cover aliases. Caveats: * Role Descriptors that are not used in any authorization process, either because they are not mapped to any user or the user they are mapped to is not used by clients, are not be checked. * Role Descriptors are merged when building the effective Role that is used in the authorization process. Therefore some Role Descriptors can overlap others, so even if one matches aliases in a deprecated way, and it is reported as such, it is not at risk from the breaking behavior in the current role mapping configuration and index-alias configuration. It is still reported because it is a best practice to change its definition, or remove offending aliases. --- .../authz/permission/IndicesPermission.java | 2 +- .../xpack/security/Security.java | 5 +- .../authz/store/CompositeRolesStore.java | 16 +- .../DeprecationRoleDescriptorConsumer.java | 222 +++++++++++++ .../authz/store/CompositeRolesStoreTests.java | 147 +++++++-- ...eprecationRoleDescriptorConsumerTests.java | 304 ++++++++++++++++++ 6 files changed, 655 insertions(+), 41 deletions(-) create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumer.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index ed56218f71b..356e80c4975 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -53,7 +53,7 @@ public final class IndicesPermission { this.groups = groups; } - static Predicate indexMatcher(Collection indices) { + public static Predicate indexMatcher(Collection indices) { Set exactMatch = new HashSet<>(); List nonExactMatch = new ArrayList<>(); for (String indexPattern : indices) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index f5c1aa81908..33de1c90f03 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -182,6 +182,7 @@ import org.elasticsearch.xpack.security.authz.interceptor.ResizeRequestIntercept import org.elasticsearch.xpack.security.authz.interceptor.SearchRequestInterceptor; import org.elasticsearch.xpack.security.authz.interceptor.UpdateRequestInterceptor; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.authz.store.DeprecationRoleDescriptorConsumer; import org.elasticsearch.xpack.security.authz.store.FileRolesStore; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; @@ -443,8 +444,10 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw threadPool); components.add(apiKeyService); final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, - privilegeStore, rolesProviders, threadPool.getThreadContext(), getLicenseState(), fieldPermissionsCache, apiKeyService); + privilegeStore, rolesProviders, threadPool.getThreadContext(), getLicenseState(), fieldPermissionsCache, apiKeyService, + new DeprecationRoleDescriptorConsumer(clusterService, threadPool)); securityIndex.get().addIndexStateListener(allRolesStore::onSecurityIndexStateChange); + // to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be // minimal getLicenseState().addListener(allRolesStore::invalidateAll); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 48659b89686..781072b95a2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -62,6 +62,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -100,6 +101,7 @@ public class CompositeRolesStore { private final NativeRolesStore nativeRolesStore; private final NativePrivilegeStore privilegeStore; private final XPackLicenseState licenseState; + private final Consumer> effectiveRoleDescriptorsConsumer; private final FieldPermissionsCache fieldPermissionsCache; private final Cache roleCache; private final Cache negativeLookupCache; @@ -115,7 +117,7 @@ public class CompositeRolesStore { ReservedRolesStore reservedRolesStore, NativePrivilegeStore privilegeStore, List, ActionListener>> rolesProviders, ThreadContext threadContext, XPackLicenseState licenseState, FieldPermissionsCache fieldPermissionsCache, - ApiKeyService apiKeyService) { + ApiKeyService apiKeyService, Consumer> effectiveRoleDescriptorsConsumer) { this.fileRolesStore = fileRolesStore; fileRolesStore.addListener(this::invalidate); this.nativeRolesStore = nativeRolesStore; @@ -123,6 +125,7 @@ public class CompositeRolesStore { this.licenseState = licenseState; this.fieldPermissionsCache = fieldPermissionsCache; this.apiKeyService = apiKeyService; + this.effectiveRoleDescriptorsConsumer = effectiveRoleDescriptorsConsumer; CacheBuilder builder = CacheBuilder.builder(); final int cacheSize = CACHE_SIZE_SETTING.get(settings); if (cacheSize >= 0) { @@ -161,9 +164,9 @@ public class CompositeRolesStore { rolesRetrievalResult -> { final boolean missingRoles = rolesRetrievalResult.getMissingRoles().isEmpty() == false; if (missingRoles) { - logger.debug("Could not find roles with names {}", rolesRetrievalResult.getMissingRoles()); + logger.debug(() -> new ParameterizedMessage("Could not find roles with names {}", + rolesRetrievalResult.getMissingRoles())); } - final Set effectiveDescriptors; if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { effectiveDescriptors = rolesRetrievalResult.getRoleDescriptors(); @@ -172,6 +175,11 @@ public class CompositeRolesStore { .filter((rd) -> rd.isUsingDocumentOrFieldLevelSecurity() == false) .collect(Collectors.toSet()); } + logger.trace(() -> new ParameterizedMessage("Exposing effective role descriptors [{}] for role names [{}]", + effectiveDescriptors, roleNames)); + effectiveRoleDescriptorsConsumer.accept(Collections.unmodifiableCollection(effectiveDescriptors)); + logger.trace(() -> new ParameterizedMessage("Building role from descriptors [{}] for role names [{}]", + effectiveDescriptors, roleNames)); buildThenMaybeCacheRole(roleKey, effectiveDescriptors, rolesRetrievalResult.getMissingRoles(), rolesRetrievalResult.isSuccess(), invalidationCounter, roleActionListener); }, @@ -287,7 +295,7 @@ public class CompositeRolesStore { private void roleDescriptors(Set roleNames, ActionListener rolesResultListener) { final Set filteredRoleNames = roleNames.stream().filter((s) -> { if (negativeLookupCache.get(s) != null) { - logger.debug("Requested role [{}] does not exist (cached)", s); + logger.debug(() -> new ParameterizedMessage("Requested role [{}] does not exist (cached)", s)); return false; } else { return true; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumer.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumer.java new file mode 100644 index 00000000000..e3be8e09729 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumer.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authz.store; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; + +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.Locale; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.RejectedExecutionException; +import java.util.function.Consumer; +import java.util.function.Predicate; + +/** + * Inspects all aliases that have greater privileges than the indices that they point to and logs the role descriptor, granting privileges + * in this manner, as deprecated and requiring changes. This is done in preparation for the removal of the ability to define privileges over + * aliases. The log messages are generated asynchronously and do not generate deprecation response headers. One log entry is generated for + * each role descriptor and alias pair, and it contains all the indices for which privileges are a subset of those of the alias. In this + * case, the administrator has to adjust the index privileges definition of the respective role such that name patterns do not cover aliases + * (or rename aliases). If no logging is generated then the roles used for the current indices and aliases are not vulnerable to the + * subsequent breaking change. However, there could be role descriptors that are not used (not mapped to a user that is currently using the + * system) which are invisible to this check. Moreover, role descriptors can be dynamically added by role providers. In addition, role + * descriptors are merged when building the effective role, so a role-alias pair reported as deprecated might not actually have an impact if + * other role descriptors cover its indices. The check iterates over all indices and aliases for each role descriptor so it is quite + * expensive computationally. For this reason the check is done only once a day for each role. If the role definitions stay the same, the + * deprecations can change from one day to another only if aliases or indices are added. + */ +public final class DeprecationRoleDescriptorConsumer implements Consumer> { + + private static final String ROLE_PERMISSION_DEPRECATION_STANZA = "Role [%s] contains index privileges covering the [%s] alias but" + + " which do not cover some of the indices that it points to [%s]. Granting privileges over an alias and hence granting" + + " privileges over all the indices that the alias points to is deprecated and will be removed in a future version of" + + " Elasticsearch. Instead define permissions exclusively on index names or index name patterns."; + + private static final Logger logger = LogManager.getLogger(DeprecationRoleDescriptorConsumer.class); + + private final DeprecationLogger deprecationLogger; + private final ClusterService clusterService; + private final ThreadPool threadPool; + private final Object mutex; + private final Queue workQueue; + private boolean workerBusy; + private final Set dailyRoleCache; + + public DeprecationRoleDescriptorConsumer(ClusterService clusterService, ThreadPool threadPool) { + this(clusterService, threadPool, new DeprecationLogger(logger)); + } + + // package-private for testing + DeprecationRoleDescriptorConsumer(ClusterService clusterService, ThreadPool threadPool, DeprecationLogger deprecationLogger) { + this.deprecationLogger = deprecationLogger; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.mutex = new Object(); + this.workQueue = new LinkedList<>(); + this.workerBusy = false; + // this String Set keeps "-" pairs so that we only log a role once a day. + this.dailyRoleCache = Collections.newSetFromMap(new LinkedHashMap() { + @Override + protected boolean removeEldestEntry(final Map.Entry eldest) { + return false == eldest.getKey().startsWith(todayISODate()); + } + }); + } + + @Override + public void accept(Collection effectiveRoleDescriptors) { + synchronized (mutex) { + for (RoleDescriptor roleDescriptor : effectiveRoleDescriptors) { + if (dailyRoleCache.add(buildCacheKey(roleDescriptor))) { + workQueue.add(roleDescriptor); + } + } + if (false == workerBusy) { + workerBusy = true; + try { + // spawn another worker on the generic thread pool + threadPool.generic().execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception e) { + logger.warn("Failed to produce role deprecation messages", e); + synchronized (mutex) { + final boolean hasMoreWork = workQueue.peek() != null; + if (hasMoreWork) { + workerBusy = true; // just being paranoid :) + try { + threadPool.generic().execute(this); + } catch (RejectedExecutionException e1) { + workerBusy = false; + logger.warn("Failed to start working on role alias permisssion deprecation messages", e1); + } + } else { + workerBusy = false; + } + } + } + + @Override + protected void doRun() throws Exception { + while (true) { + final RoleDescriptor workItem; + synchronized (mutex) { + workItem = workQueue.poll(); + if (workItem == null) { + workerBusy = false; + break; + } + } + logger.trace("Begin role [" + workItem.getName() + "] check for alias permission deprecation"); + // executing the check asynchronously will not conserve the generated deprecation response headers (which is + // what we want, because it's not the request that uses deprecated features, but rather the role definition. + // Furthermore, due to caching, we can't reliably associate response headers to every request). + logDeprecatedPermission(workItem); + logger.trace("Completed role [" + workItem.getName() + "] check for alias permission deprecation"); + } + } + }); + } catch (RejectedExecutionException e) { + workerBusy = false; + logger.warn("Failed to start working on role alias permisssion deprecation messages", e); + } + } + } + } + + private void logDeprecatedPermission(RoleDescriptor roleDescriptor) { + final SortedMap aliasOrIndexMap = clusterService.state().metaData().getAliasAndIndexLookup(); + final Map> privilegesByAliasMap = new HashMap<>(); + // sort answer by alias for tests + final SortedMap> privilegesByIndexMap = new TreeMap<>(); + // collate privileges by index and by alias separately + for (final IndicesPrivileges indexPrivilege : roleDescriptor.getIndicesPrivileges()) { + final Predicate namePatternPredicate = IndicesPermission.indexMatcher(Arrays.asList(indexPrivilege.getIndices())); + for (final Map.Entry aliasOrIndex : aliasOrIndexMap.entrySet()) { + final String aliasOrIndexName = aliasOrIndex.getKey(); + if (namePatternPredicate.test(aliasOrIndexName)) { + if (aliasOrIndex.getValue().isAlias()) { + final Set privilegesByAlias = privilegesByAliasMap.computeIfAbsent(aliasOrIndexName, + k -> new HashSet()); + privilegesByAlias.addAll(Arrays.asList(indexPrivilege.getPrivileges())); + } else { + final Set privilegesByIndex = privilegesByIndexMap.computeIfAbsent(aliasOrIndexName, + k -> new HashSet()); + privilegesByIndex.addAll(Arrays.asList(indexPrivilege.getPrivileges())); + } + } + } + } + // compute privileges Automaton for each alias and for each of the indices it points to + final Map indexAutomatonMap = new HashMap<>(); + for (Map.Entry> privilegesByAlias : privilegesByAliasMap.entrySet()) { + final String aliasName = privilegesByAlias.getKey(); + final Set aliasPrivilegeNames = privilegesByAlias.getValue(); + final Automaton aliasPrivilegeAutomaton = IndexPrivilege.get(aliasPrivilegeNames).getAutomaton(); + final SortedSet inferiorIndexNames = new TreeSet<>(); + // check if the alias grants superiors privileges than the indices it points to + for (IndexMetaData indexMetadata : aliasOrIndexMap.get(aliasName).getIndices()) { + final String indexName = indexMetadata.getIndex().getName(); + final Set indexPrivileges = privilegesByIndexMap.get(indexName); + // null iff the index does not have *any* privilege + if (indexPrivileges != null) { + // compute automaton once per index no matter how many times it is pointed to + final Automaton indexPrivilegeAutomaton = indexAutomatonMap.computeIfAbsent(indexName, + i -> IndexPrivilege.get(indexPrivileges).getAutomaton()); + if (false == Operations.subsetOf(indexPrivilegeAutomaton, aliasPrivilegeAutomaton)) { + inferiorIndexNames.add(indexName); + } + } else { + inferiorIndexNames.add(indexName); + } + } + // log inferior indices for this role, for this alias + if (false == inferiorIndexNames.isEmpty()) { + final String logMessage = String.format(Locale.ROOT, ROLE_PERMISSION_DEPRECATION_STANZA, roleDescriptor.getName(), + aliasName, String.join(", ", inferiorIndexNames)); + deprecationLogger.deprecated(logMessage); + } + } + } + + private static String todayISODate() { + return ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.BASIC_ISO_DATE); + } + + // package-private for testing + static String buildCacheKey(RoleDescriptor roleDescriptor) { + return todayISODate() + "-" + roleDescriptor.getName(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index f17442ca846..2278f27a6df 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -66,6 +66,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -74,9 +75,11 @@ import java.util.function.Predicate; import static org.elasticsearch.mock.orig.Mockito.times; import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anySetOf; import static org.mockito.Matchers.eq; @@ -143,25 +146,35 @@ public class CompositeRolesStoreTests extends ESTestCase { when(fileRolesStore.roleDescriptors(Collections.singleton("dls"))).thenReturn(Collections.singleton(dlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("fls_dls"))).thenReturn(Collections.singleton(flsDlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("no_fls_dls"))).thenReturn(Collections.singleton(noFlsDlsRole)); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), - new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class)); + new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class), + rds -> effectiveRoleDescriptors.set(rds)); PlainActionFuture roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("fls"), roleFuture); assertEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("dls"), roleFuture); assertEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("fls_dls"), roleFuture); assertEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("no_fls_dls"), roleFuture); assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(noFlsDlsRole)); + effectiveRoleDescriptors.set(null); } public void testRolesWhenDlsFlsLicensed() throws IOException { @@ -208,25 +221,35 @@ public class CompositeRolesStoreTests extends ESTestCase { when(fileRolesStore.roleDescriptors(Collections.singleton("dls"))).thenReturn(Collections.singleton(dlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("fls_dls"))).thenReturn(Collections.singleton(flsDlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("no_fls_dls"))).thenReturn(Collections.singleton(noFlsDlsRole)); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), - new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class)); + new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class), + rds -> effectiveRoleDescriptors.set(rds)); PlainActionFuture roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("fls"), roleFuture); assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(flsRole)); + effectiveRoleDescriptors.set(null); roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("dls"), roleFuture); assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(dlsRole)); + effectiveRoleDescriptors.set(null); roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("fls_dls"), roleFuture); assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(flsDlsRole)); + effectiveRoleDescriptors.set(null); roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("no_fls_dls"), roleFuture); assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(noFlsDlsRole)); + effectiveRoleDescriptors.set(null); } public void testNegativeLookupsAreCached() { @@ -249,16 +272,20 @@ public class CompositeRolesStoreTests extends ESTestCase { return null; }).when(nativePrivilegeStore).getPrivileges(isA(Set.class), isA(Set.class), any(ActionListener.class)); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, nativePrivilegeStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), + rds -> effectiveRoleDescriptors.set(rds)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor final String roleName = randomAlphaOfLengthBetween(1, 10); PlainActionFuture future = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton(roleName), future); final Role role = future.actionGet(); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); assertEquals(Role.EMPTY, role); verify(reservedRolesStore).accept(anySetOf(String.class), any(ActionListener.class)); verify(fileRolesStore).accept(anySetOf(String.class), any(ActionListener.class)); @@ -275,6 +302,12 @@ public class CompositeRolesStoreTests extends ESTestCase { future = new PlainActionFuture<>(); compositeRolesStore.roles(names, future); future.actionGet(); + if (getSuperuserRole && i == 0) { + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR)); + effectiveRoleDescriptors.set(null); + } else { + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); + } } if (getSuperuserRole && numberOfTimesToCall > 0) { @@ -301,15 +334,18 @@ public class CompositeRolesStoreTests extends ESTestCase { final Settings settings = Settings.builder().put(SECURITY_ENABLED_SETTINGS) .put("xpack.security.authz.store.roles.negative_lookup_cache.max_size", 0) .build(); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(settings), - new XPackLicenseState(settings), cache, mock(ApiKeyService.class)); + new XPackLicenseState(settings), cache, mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor final String roleName = randomAlphaOfLengthBetween(1, 10); PlainActionFuture future = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton(roleName), future); final Role role = future.actionGet(); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); assertEquals(Role.EMPTY, role); verify(reservedRolesStore).accept(anySetOf(String.class), any(ActionListener.class)); verify(fileRolesStore).accept(anySetOf(String.class), any(ActionListener.class)); @@ -334,16 +370,20 @@ public class CompositeRolesStoreTests extends ESTestCase { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), + rds -> effectiveRoleDescriptors.set(rds)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor final String roleName = randomAlphaOfLengthBetween(1, 10); PlainActionFuture future = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton(roleName), future); final Role role = future.actionGet(); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); assertEquals(Role.EMPTY, role); verify(reservedRolesStore).accept(anySetOf(String.class), any(ActionListener.class)); verify(fileRolesStore).accept(anySetOf(String.class), any(ActionListener.class)); @@ -357,6 +397,8 @@ public class CompositeRolesStoreTests extends ESTestCase { future = new PlainActionFuture<>(); compositeRolesStore.roles(names, future); future.actionGet(); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); } assertFalse(compositeRolesStore.isValueInNegativeLookupCache(roleName)); @@ -381,17 +423,22 @@ public class CompositeRolesStoreTests extends ESTestCase { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + final RoleDescriptor roleAProvider1 = new RoleDescriptor("roleA", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() + }, null); final InMemoryRolesProvider inMemoryProvider1 = spy(new InMemoryRolesProvider((roles) -> { Set descriptors = new HashSet<>(); if (roles.contains("roleA")) { - descriptors.add(new RoleDescriptor("roleA", null, - new IndicesPrivileges[] { - IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() - }, null)); + descriptors.add(roleAProvider1); } return RoleRetrievalResult.success(descriptors); })); + final RoleDescriptor roleBProvider2 = new RoleDescriptor("roleB", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("bar").grantedFields("*").build() + }, null); final InMemoryRolesProvider inMemoryProvider2 = spy(new InMemoryRolesProvider((roles) -> { Set descriptors = new HashSet<>(); if (roles.contains("roleA")) { @@ -403,24 +450,24 @@ public class CompositeRolesStoreTests extends ESTestCase { }, null)); } if (roles.contains("roleB")) { - descriptors.add(new RoleDescriptor("roleB", null, - new IndicesPrivileges[] { - IndicesPrivileges.builder().privileges("READ").indices("bar").grantedFields("*").build() - }, null)); + descriptors.add(roleBProvider2); } return RoleRetrievalResult.success(descriptors); })); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider1, inMemoryProvider2), - new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, - mock(ApiKeyService.class)); + new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS), + cache, mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds)); final Set roleNames = Sets.newHashSet("roleA", "roleB", "unknown"); PlainActionFuture future = new PlainActionFuture<>(); compositeRolesStore.roles(roleNames, future); final Role role = future.actionGet(); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(roleAProvider1, roleBProvider2)); + effectiveRoleDescriptors.set(null); // make sure custom roles providers populate roles correctly assertEquals(2, role.indices().groups().length); @@ -438,6 +485,12 @@ public class CompositeRolesStoreTests extends ESTestCase { future = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("unknown"), future); future.actionGet(); + if (i == 0) { + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + } else { + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); + } + effectiveRoleDescriptors.set(null); } verifyNoMoreInteractions(inMemoryProvider1, inMemoryProvider2); @@ -616,11 +669,12 @@ public class CompositeRolesStoreTests extends ESTestCase { final BiConsumer, ActionListener> failingProvider = (roles, listener) -> listener.onFailure(new Exception("fake failure")); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider1, failingProvider), - new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, - mock(ApiKeyService.class)); + new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS), + cache, mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds)); final Set roleNames = Sets.newHashSet("roleA", "roleB", "unknown"); PlainActionFuture future = new PlainActionFuture<>(); @@ -630,6 +684,7 @@ public class CompositeRolesStoreTests extends ESTestCase { fail("provider should have thrown a failure"); } catch (ExecutionException e) { assertEquals("fake failure", e.getCause().getMessage()); + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); } } @@ -646,13 +701,14 @@ public class CompositeRolesStoreTests extends ESTestCase { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(); + final RoleDescriptor roleA = new RoleDescriptor("roleA", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() + }, null); final InMemoryRolesProvider inMemoryProvider = new InMemoryRolesProvider((roles) -> { Set descriptors = new HashSet<>(); if (roles.contains("roleA")) { - descriptors.add(new RoleDescriptor("roleA", null, - new IndicesPrivileges[] { - IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() - }, null)); + descriptors.add(roleA); } return RoleRetrievalResult.success(descriptors); }); @@ -660,27 +716,34 @@ public class CompositeRolesStoreTests extends ESTestCase { UpdatableLicenseState xPackLicenseState = new UpdatableLicenseState(SECURITY_ENABLED_SETTINGS); // these licenses don't allow custom role providers xPackLicenseState.update(randomFrom(OperationMode.BASIC, OperationMode.GOLD, OperationMode.STANDARD), true, null); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); CompositeRolesStore compositeRolesStore = new CompositeRolesStore( Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), - Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, mock(ApiKeyService.class)); + Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, + mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds)); Set roleNames = Sets.newHashSet("roleA"); PlainActionFuture future = new PlainActionFuture<>(); compositeRolesStore.roles(roleNames, future); Role role = future.actionGet(); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); + effectiveRoleDescriptors.set(null); // no roles should've been populated, as the license doesn't permit custom role providers assertEquals(0, role.indices().groups().length); compositeRolesStore = new CompositeRolesStore( Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), - Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, mock(ApiKeyService.class)); + Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, + mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds)); // these licenses allow custom role providers xPackLicenseState.update(randomFrom(OperationMode.PLATINUM, OperationMode.TRIAL), true, null); roleNames = Sets.newHashSet("roleA"); future = new PlainActionFuture<>(); compositeRolesStore.roles(roleNames, future); role = future.actionGet(); + assertThat(effectiveRoleDescriptors.get(), containsInAnyOrder(roleA)); + effectiveRoleDescriptors.set(null); // roleA should've been populated by the custom role provider, because the license allows it assertEquals(1, role.indices().groups().length); @@ -688,13 +751,15 @@ public class CompositeRolesStoreTests extends ESTestCase { // license expired, don't allow custom role providers compositeRolesStore = new CompositeRolesStore( Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), - Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, mock(ApiKeyService.class)); + Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, + mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds)); xPackLicenseState.update(randomFrom(OperationMode.PLATINUM, OperationMode.TRIAL), false, null); roleNames = Sets.newHashSet("roleA"); future = new PlainActionFuture<>(); compositeRolesStore.roles(roleNames, future); role = future.actionGet(); assertEquals(0, role.indices().groups().length); + assertThat(effectiveRoleDescriptors.get().isEmpty(), is(true)); } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { @@ -713,7 +778,7 @@ public class CompositeRolesStoreTests extends ESTestCase { CompositeRolesStore compositeRolesStore = new CompositeRolesStore( Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(Settings.EMPTY), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)) { + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), rds -> {}) { @Override public void invalidateAll() { numInvalidation.incrementAndGet(); @@ -765,7 +830,7 @@ public class CompositeRolesStoreTests extends ESTestCase { CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)) { + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), rds -> {}) { @Override public void invalidateAll() { numInvalidation.incrementAndGet(); @@ -799,10 +864,9 @@ public class CompositeRolesStoreTests extends ESTestCase { final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), rds -> {}); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor - PlainActionFuture rolesFuture = new PlainActionFuture<>(); final User user = new User("no role user"); Authentication auth = new Authentication(user, new RealmRef("name", "type", "node"), null); @@ -839,7 +903,7 @@ public class CompositeRolesStoreTests extends ESTestCase { final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(settings), - new XPackLicenseState(settings), cache, mock(ApiKeyService.class)); + new XPackLicenseState(settings), cache, mock(ApiKeyService.class), rds -> {}); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor PlainActionFuture rolesFuture = new PlainActionFuture<>(); @@ -863,10 +927,12 @@ public class CompositeRolesStoreTests extends ESTestCase { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), + rds -> effectiveRoleDescriptors.set(rds)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor PlainActionFuture rolesFuture = new PlainActionFuture<>(); @@ -874,6 +940,7 @@ public class CompositeRolesStoreTests extends ESTestCase { compositeRolesStore.getRoles(XPackUser.INSTANCE, auth, rolesFuture); final Role roles = rolesFuture.actionGet(); assertThat(roles, equalTo(XPackUser.ROLE)); + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); verifyNoMoreInteractions(fileRolesStore, nativeRolesStore, reservedRolesStore); } @@ -890,13 +957,16 @@ public class CompositeRolesStoreTests extends ESTestCase { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class)); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), + rds -> effectiveRoleDescriptors.set(rds)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> compositeRolesStore.getRoles(SystemUser.INSTANCE, null, null)); + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); assertEquals("the user [_system] is the system user and we should never try to get its roles", iae.getMessage()); } @@ -921,10 +991,13 @@ public class CompositeRolesStoreTests extends ESTestCase { listener.onResponse(Collections.emptyList()); return Void.TYPE; }).when(nativePrivStore).getPrivileges(any(Collection.class), any(Collection.class), any(ActionListener.class)); + + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, nativePrivStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService, + rds -> effectiveRoleDescriptors.set(rds)); AuditUtil.getOrGenerateRequestId(threadContext); final Authentication authentication = new Authentication(new User("test api key user", "superuser"), new RealmRef("_es_api_key", "_es_api_key", "node"), null, Version.CURRENT, AuthenticationType.API_KEY, Collections.emptyMap()); @@ -938,7 +1011,7 @@ public class CompositeRolesStoreTests extends ESTestCase { PlainActionFuture roleFuture = new PlainActionFuture<>(); compositeRolesStore.getRoles(authentication.getUser(), authentication, roleFuture); roleFuture.actionGet(); - + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); verify(apiKeyService).getRoleForApiKey(eq(authentication), any(ActionListener.class)); } @@ -963,10 +1036,13 @@ public class CompositeRolesStoreTests extends ESTestCase { listener.onResponse(Collections.emptyList()); return Void.TYPE; }).when(nativePrivStore).getPrivileges(any(Collection.class), any(Collection.class), any(ActionListener.class)); + + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, nativePrivStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService); + new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService, + rds -> effectiveRoleDescriptors.set(rds)); AuditUtil.getOrGenerateRequestId(threadContext); final Authentication authentication = new Authentication(new User("test api key user", "api_key"), new RealmRef("_es_api_key", "_es_api_key", "node"), null, Version.CURRENT, AuthenticationType.API_KEY, Collections.emptyMap()); @@ -983,6 +1059,7 @@ public class CompositeRolesStoreTests extends ESTestCase { compositeRolesStore.getRoles(authentication.getUser(), authentication, roleFuture); Role role = roleFuture.actionGet(); assertThat(role.checkClusterAction("cluster:admin/foo", Empty.INSTANCE), is(false)); + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); verify(apiKeyService).getRoleForApiKey(eq(authentication), any(ActionListener.class)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java new file mode 100644 index 00000000000..425370feed3 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java @@ -0,0 +1,304 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authz.store; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.mock.orig.Mockito; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import java.util.Arrays; +import java.util.concurrent.ExecutorService; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public final class DeprecationRoleDescriptorConsumerTests extends ESTestCase { + + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + this.threadPool = mock(ThreadPool.class); + ExecutorService executorService = mock(ExecutorService.class); + Mockito.doAnswer((Answer) invocation -> { + final Runnable arg0 = (Runnable) invocation.getArguments()[0]; + arg0.run(); + return null; + }).when(executorService).execute(Mockito.isA(Runnable.class)); + when(threadPool.generic()).thenReturn(executorService); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + } + + public void testSimpleAliasAndIndexPair() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index", "alias"); + final RoleDescriptor roleOverAlias = new RoleDescriptor("roleOverAlias", new String[] { "read" }, + new RoleDescriptor.IndicesPrivileges[] { indexPrivileges(randomFrom("read", "write", "delete", "index"), "alias") }, null); + final RoleDescriptor roleOverIndex = new RoleDescriptor("roleOverIndex", new String[] { "manage" }, + new RoleDescriptor.IndicesPrivileges[] { indexPrivileges(randomFrom("read", "write", "delete", "index"), "index") }, null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(roleOverAlias, roleOverIndex)); + verifyLogger(deprecationLogger, "roleOverAlias", "alias", "index"); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testRoleGrantsOnIndexAndAliasPair() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index", "alias"); + addIndex(metaDataBuilder, "index1", "alias2"); + final RoleDescriptor roleOverIndexAndAlias = new RoleDescriptor("roleOverIndexAndAlias", new String[] { "manage_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges(randomFrom("read", "write", "delete", "index"), "index", "alias") }, + null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(roleOverIndexAndAlias)); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testMultiplePrivilegesLoggedOnce() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index", "alias"); + addIndex(metaDataBuilder, "index2", "alias2"); + final RoleDescriptor roleOverAlias = new RoleDescriptor("roleOverAlias", new String[] { "manage_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("write", "alias"), + indexPrivileges("manage_ilm", "alias") }, + null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(roleOverAlias)); + verifyLogger(deprecationLogger, "roleOverAlias", "alias", "index"); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testMultiplePrivilegesLoggedForEachAlias() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index", "alias", "alias3"); + addIndex(metaDataBuilder, "index2", "alias2", "alias", "alias4"); + addIndex(metaDataBuilder, "index3", "alias3", "alias"); + addIndex(metaDataBuilder, "index4", "alias4", "alias"); + addIndex(metaDataBuilder, "foo", "bar"); + final RoleDescriptor roleMultiplePrivileges = new RoleDescriptor("roleMultiplePrivileges", new String[] { "manage_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("write", "index2", "alias"), + indexPrivileges("read", "alias4"), + indexPrivileges("delete_index", "alias3", "index"), + indexPrivileges("create_index", "alias3", "index3")}, + null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(roleMultiplePrivileges)); + verifyLogger(deprecationLogger, "roleMultiplePrivileges", "alias", "index, index3, index4"); + verifyLogger(deprecationLogger, "roleMultiplePrivileges", "alias4", "index2, index4"); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testPermissionsOverlapping() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index1", "alias1", "bar"); + addIndex(metaDataBuilder, "index2", "alias2", "baz"); + addIndex(metaDataBuilder, "foo", "bar"); + final RoleDescriptor roleOverAliasAndIndex = new RoleDescriptor("roleOverAliasAndIndex", new String[] { "read_ilm" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("monitor", "index2", "alias1"), + indexPrivileges("monitor", "index1", "alias2")}, + null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(roleOverAliasAndIndex)); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testOverlappingAcrossMultipleRoleDescriptors() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index1", "alias1", "bar"); + addIndex(metaDataBuilder, "index2", "alias2", "baz"); + addIndex(metaDataBuilder, "foo", "bar"); + final RoleDescriptor role1 = new RoleDescriptor("role1", new String[] { "monitor_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("monitor", "index2", "alias1")}, + null); + final RoleDescriptor role2 = new RoleDescriptor("role2", new String[] { "read_ccr" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("monitor", "index1", "alias2")}, + null); + final RoleDescriptor role3 = new RoleDescriptor("role3", new String[] { "monitor_ml" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("index", "bar")}, + null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(role1, role2, role3)); + verifyLogger(deprecationLogger, "role1", "alias1", "index1"); + verifyLogger(deprecationLogger, "role2", "alias2", "index2"); + verifyLogger(deprecationLogger, "role3", "bar", "foo, index1"); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testDailyRoleCaching() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index1", "alias1", "far"); + addIndex(metaDataBuilder, "index2", "alias2", "baz"); + addIndex(metaDataBuilder, "foo", "bar"); + final MetaData metaData = metaDataBuilder.build(); + RoleDescriptor someRole = new RoleDescriptor("someRole", new String[] { "monitor_rollup" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("monitor", "i*", "bar")}, + null); + final DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer(mockClusterService(metaData), + threadPool, deprecationLogger); + final String cacheKeyBefore = DeprecationRoleDescriptorConsumer.buildCacheKey(someRole); + deprecationConsumer.accept(Arrays.asList(someRole)); + verifyLogger(deprecationLogger, "someRole", "bar", "foo"); + verifyNoMoreInteractions(deprecationLogger); + deprecationConsumer.accept(Arrays.asList(someRole)); + final String cacheKeyAfter = DeprecationRoleDescriptorConsumer.buildCacheKey(someRole); + // we don't do this test if it crosses days + if (false == cacheKeyBefore.equals(cacheKeyAfter)) { + return; + } + verifyNoMoreInteractions(deprecationLogger); + RoleDescriptor differentRoleSameName = new RoleDescriptor("someRole", new String[] { "manage_pipeline" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("write", "i*", "baz")}, + null); + deprecationConsumer.accept(Arrays.asList(differentRoleSameName)); + final String cacheKeyAfterParty = DeprecationRoleDescriptorConsumer.buildCacheKey(differentRoleSameName); + // we don't do this test if it crosses days + if (false == cacheKeyBefore.equals(cacheKeyAfterParty)) { + return; + } + verifyNoMoreInteractions(deprecationLogger); + } + + public void testWildcards() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index", "alias", "alias3"); + addIndex(metaDataBuilder, "index2", "alias", "alias2", "alias4"); + addIndex(metaDataBuilder, "index3", "alias", "alias3"); + addIndex(metaDataBuilder, "index4", "alias", "alias4"); + addIndex(metaDataBuilder, "foo", "bar", "baz"); + MetaData metaData = metaDataBuilder.build(); + final RoleDescriptor roleGlobalWildcard = new RoleDescriptor("roleGlobalWildcard", new String[] { "manage_token" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges(randomFrom("write", "delete_index", "read_cross_cluster"), "*")}, + null); + new DeprecationRoleDescriptorConsumer(mockClusterService(metaData), threadPool, deprecationLogger) + .accept(Arrays.asList(roleGlobalWildcard)); + verifyNoMoreInteractions(deprecationLogger); + final RoleDescriptor roleGlobalWildcard2 = new RoleDescriptor("roleGlobalWildcard2", new String[] { "manage_index_templates" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges(randomFrom("write", "delete_index", "read_cross_cluster"), "i*", "a*")}, + null); + new DeprecationRoleDescriptorConsumer(mockClusterService(metaData), threadPool, deprecationLogger) + .accept(Arrays.asList(roleGlobalWildcard2)); + verifyNoMoreInteractions(deprecationLogger); + final RoleDescriptor roleWildcardOnIndices = new RoleDescriptor("roleWildcardOnIndices", new String[] { "manage_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("write", "index*", "alias", "alias3"), + indexPrivileges("read", "foo")}, + null); + new DeprecationRoleDescriptorConsumer(mockClusterService(metaData), threadPool, deprecationLogger) + .accept(Arrays.asList(roleWildcardOnIndices)); + verifyNoMoreInteractions(deprecationLogger); + final RoleDescriptor roleWildcardOnAliases = new RoleDescriptor("roleWildcardOnAliases", new String[] { "manage_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("write", "alias*", "index", "index3"), + indexPrivileges("read", "foo", "index2")}, + null); + new DeprecationRoleDescriptorConsumer(mockClusterService(metaData), threadPool, deprecationLogger) + .accept(Arrays.asList(roleWildcardOnAliases)); + verifyLogger(deprecationLogger, "roleWildcardOnAliases", "alias", "index2, index4"); + verifyLogger(deprecationLogger, "roleWildcardOnAliases", "alias2", "index2"); + verifyLogger(deprecationLogger, "roleWildcardOnAliases", "alias4", "index2, index4"); + verifyNoMoreInteractions(deprecationLogger); + } + + public void testMultipleIndicesSameAlias() throws Exception { + final DeprecationLogger deprecationLogger = mock(DeprecationLogger.class); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + addIndex(metaDataBuilder, "index1", "alias1"); + addIndex(metaDataBuilder, "index2", "alias1", "alias2"); + addIndex(metaDataBuilder, "index3", "alias2"); + final RoleDescriptor roleOverAliasAndIndex = new RoleDescriptor("roleOverAliasAndIndex", new String[] { "manage_ml" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("delete_index", "alias1", "index1") }, + null); + DeprecationRoleDescriptorConsumer deprecationConsumer = new DeprecationRoleDescriptorConsumer( + mockClusterService(metaDataBuilder.build()), threadPool, deprecationLogger); + deprecationConsumer.accept(Arrays.asList(roleOverAliasAndIndex)); + verifyLogger(deprecationLogger, "roleOverAliasAndIndex", "alias1", "index2"); + verifyNoMoreInteractions(deprecationLogger); + final RoleDescriptor roleOverAliases = new RoleDescriptor("roleOverAliases", new String[] { "manage_security" }, + new RoleDescriptor.IndicesPrivileges[] { + indexPrivileges("monitor", "alias1", "alias2") }, + null); + deprecationConsumer.accept(Arrays.asList(roleOverAliases)); + verifyLogger(deprecationLogger, "roleOverAliases", "alias1", "index1, index2"); + verifyLogger(deprecationLogger, "roleOverAliases", "alias2", "index2, index3"); + verifyNoMoreInteractions(deprecationLogger); + } + + private void addIndex(MetaData.Builder metaDataBuilder, String index, String... aliases) { + final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(index) + .settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random()))) + .numberOfShards(1) + .numberOfReplicas(1); + for (final String alias : aliases) { + indexMetaDataBuilder.putAlias(AliasMetaData.builder(alias).build()); + } + metaDataBuilder.put(indexMetaDataBuilder.build(), false); + } + + private ClusterService mockClusterService(MetaData metaData) { + final ClusterService clusterService = mock(ClusterService.class); + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + when(clusterService.state()).thenReturn(clusterState); + return clusterService; + } + + private RoleDescriptor.IndicesPrivileges indexPrivileges(String priv, String... indicesOrAliases) { + return RoleDescriptor.IndicesPrivileges.builder() + .indices(indicesOrAliases) + .privileges(priv) + .grantedFields(randomArray(0, 2, String[]::new, () -> randomBoolean() ? null : randomAlphaOfLengthBetween(1, 4))) + .query(randomBoolean() ? null : "{ }") + .build(); + } + + private void verifyLogger(DeprecationLogger deprecationLogger, String roleName, String aliasName, String indexNames) { + verify(deprecationLogger).deprecated("Role [" + roleName + "] contains index privileges covering the [" + aliasName + + "] alias but which do not cover some of the indices that it points to [" + indexNames + "]. Granting privileges over an" + + " alias and hence granting privileges over all the indices that the alias points to is deprecated and will be removed" + + " in a future version of Elasticsearch. Instead define permissions exclusively on index names or index name patterns."); + } +} From 722362e402f991b3b813980fb4f36bd6c9b6b2ac Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Wed, 10 Apr 2019 13:01:14 +0100 Subject: [PATCH 31/60] Mute MachineLearningIt#testDeleteExpiredData Tracked in #41070 --- .../test/java/org/elasticsearch/client/MachineLearningIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index f7b7b148f66..3fdc00419cd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -915,6 +915,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { return forecastJobResponse.getForecastId(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41070") public void testDeleteExpiredData() throws Exception { String jobId = "test-delete-expired-data"; From 799541e0681ef9b16ec6a9a4219b2ea573750abd Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Wed, 10 Apr 2019 16:37:16 +0300 Subject: [PATCH 32/60] Mute DateTimeUnitTests.testConversion (#40738) Due to #39617 Backport of #40086 --- .../org/elasticsearch/common/rounding/DateTimeUnitTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java index f188eb4cac6..53f48becd30 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java @@ -66,6 +66,7 @@ public class DateTimeUnitTests extends ESTestCase { assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/39617") public void testConversion() { long millis = randomLongBetween(0, Instant.now().toEpochMilli()); DateTimeZone zone = randomDateTimeZone(); From f5014ace64088b3e76cfeb6c06161af1c8fce2e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Wed, 10 Apr 2019 15:43:35 +0200 Subject: [PATCH 33/60] [ML] Add validation that rejects duplicate detectors in PutJobAction (#40967) (#41072) * [ML] Add validation that rejects duplicate detectors in PutJobAction Closes #39704 * Add YML integration test for duplicate detectors fix. * Use "== false" comparison rather than "!" operator. * Refine error message to sound more natural. * Put job description in square brackets in the error message. * Use the new validation in ValidateJobConfigAction. * Exclude YML tests for new validation from permission tests. --- .../xpack/core/ml/action/PutJobAction.java | 4 ++ .../ml/action/ValidateJobConfigAction.java | 13 +++++-- .../xpack/core/ml/job/config/Detector.java | 30 +++++++++----- .../xpack/core/ml/job/config/Job.java | 15 +++++++ .../xpack/core/ml/job/messages/Messages.java | 2 + .../xpack/core/ml/job/config/JobTests.java | 14 +++++++ .../ml/qa/ml-with-security/build.gradle | 2 + .../rest-api-spec/test/ml/jobs_crud.yml | 39 +++++++++++++++++++ .../rest-api-spec/test/ml/validate.yml | 37 ++++++++++++++++++ 9 files changed, 142 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index f241e4bd375..dc3983644f7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -59,6 +59,10 @@ public class PutJobAction extends Action { public Request(Job.Builder jobBuilder) { // Validate the jobBuilder immediately so that errors can be detected prior to transportation. jobBuilder.validateInputFields(); + // Validate that detector configs are unique. + // This validation logically belongs to validateInputFields call but we perform it only for PUT action to avoid BWC issues which + // would occur when parsing an old job config that already had duplicate detectors. + jobBuilder.validateDetectorsAreUnique(); // Some fields cannot be set at create time List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java index 8854aa38c8f..99af65f4412 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -49,19 +49,24 @@ public class ValidateJobConfigAction extends Action { private Job job; public static Request parseRequest(XContentParser parser) { - Job.Builder job = Job.STRICT_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); // When jobs are PUT their ID must be supplied in the URL - assume this will // be valid unless an invalid job ID is specified in the JSON to be validated - job.setId(job.getId() != null ? job.getId() : "ok"); + jobBuilder.setId(jobBuilder.getId() != null ? jobBuilder.getId() : "ok"); + + // Validate that detector configs are unique. + // This validation logically belongs to validateInputFields call but we perform it only for PUT action to avoid BWC issues which + // would occur when parsing an old job config that already had duplicate detectors. + jobBuilder.validateDetectorsAreUnique(); // Some fields cannot be set at create time - List invalidJobCreationSettings = job.invalidCreateTimeSettings(); + List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); if (invalidJobCreationSettings.isEmpty() == false) { throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings))); } - return new Request(job.build(new Date())); + return new Request(jobBuilder.build(new Date())); } public Request() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index d9687a5a9e5..b27149ef412 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -489,44 +489,54 @@ public class Detector implements ToXContentObject, Writeable { this.fieldName = fieldName; } - public void setDetectorDescription(String detectorDescription) { + public Builder setDetectorDescription(String detectorDescription) { this.detectorDescription = detectorDescription; + return this; } - public void setFunction(String function) { + public Builder setFunction(String function) { this.function = DetectorFunction.fromString(function); + return this; } - public void setFieldName(String fieldName) { + public Builder setFieldName(String fieldName) { this.fieldName = fieldName; + return this; } - public void setByFieldName(String byFieldName) { + public Builder setByFieldName(String byFieldName) { this.byFieldName = byFieldName; + return this; } - public void setOverFieldName(String overFieldName) { + public Builder setOverFieldName(String overFieldName) { this.overFieldName = overFieldName; + return this; } - public void setPartitionFieldName(String partitionFieldName) { + public Builder setPartitionFieldName(String partitionFieldName) { this.partitionFieldName = partitionFieldName; + return this; } - public void setUseNull(boolean useNull) { + public Builder setUseNull(boolean useNull) { this.useNull = useNull; + return this; } - public void setExcludeFrequent(ExcludeFrequent excludeFrequent) { + public Builder setExcludeFrequent(ExcludeFrequent excludeFrequent) { this.excludeFrequent = excludeFrequent; + return this; } - public void setRules(List rules) { + public Builder setRules(List rules) { this.rules = rules; + return this; } - public void setDetectorIndex(int detectorIndex) { + public Builder setDetectorIndex(int detectorIndex) { this.detectorIndex = detectorIndex; + return this; } public Detector build() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index e6be2df0aed..de37702fe52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -1067,6 +1067,21 @@ public class Job extends AbstractDiffable implements Writeable, ToXContentO } } + /** + * Validates that the Detector configs are unique up to detectorIndex field (which is ignored). + */ + public void validateDetectorsAreUnique() { + Set canonicalDetectors = new HashSet<>(); + for (Detector detector : this.analysisConfig.getDetectors()) { + // While testing for equality, ignore detectorIndex field as this field is auto-generated. + Detector canonicalDetector = new Detector.Builder(detector).setDetectorIndex(0).build(); + if (canonicalDetectors.add(canonicalDetector) == false) { + throw new IllegalArgumentException( + Messages.getMessage(Messages.JOB_CONFIG_DUPLICATE_DETECTORS_DISALLOWED, detector.getDetectorDescription())); + } + } + } + /** * Builds a job with the given {@code createTime} and the current version. * This should be used when a new job is created as opposed to {@link #build()}. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 80542909efd..42ce9eb4273 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -147,6 +147,8 @@ public final class Messages { public static final String JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_MODEL_MEMORY_LIMIT_CANNOT_BE_DECREASED = "Invalid update value for analysis_limits: model_memory_limit cannot be decreased below current usage; " + "current usage [{0}], update had [{1}]"; + public static final String JOB_CONFIG_DUPLICATE_DETECTORS_DISALLOWED = + "Duplicate detectors are not allowed: [{0}]"; public static final String JOB_CONFIG_DETECTOR_DUPLICATE_FIELD_NAME = "{0} and {1} cannot be the same: ''{2}''"; public static final String JOB_CONFIG_DETECTOR_COUNT_DISALLOWED = diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index fc93f5c02a9..47e96f8b5ad 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -513,6 +513,20 @@ public class JobTests extends AbstractSerializingTestCase { assertEquals(e.getMessage(), "job and group names must be unique but job [foo] and group [foo] have the same name"); } + public void testInvalidAnalysisConfig_duplicateDetectors() throws Exception { + Job.Builder builder = + new Job.Builder("job_with_duplicate_detectors") + .setCreateTime(new Date()) + .setDataDescription(new DataDescription.Builder()) + .setAnalysisConfig(new AnalysisConfig.Builder(Arrays.asList( + new Detector.Builder("mean", "responsetime").build(), + new Detector.Builder("mean", "responsetime").build() + ))); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::validateDetectorsAreUnique); + assertThat(e.getMessage(), containsString("Duplicate detectors are not allowed: [mean(responsetime)]")); + } + public void testEarliestValidTimestamp_GivenEmptyDataCounts() { assertThat(createRandomizedJob().earliestValidTimestamp(new DataCounts("foo")), equalTo(0L)); } diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index 43421b4591f..bc0b0ca5b7b 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -57,6 +57,7 @@ integTestRunner { 'ml/jobs_crud/Test put job after closing state index', 'ml/jobs_crud/Test put job with inconsistent body/param ids', 'ml/jobs_crud/Test put job with time field in analysis_config', + 'ml/jobs_crud/Test put job with duplicate detector configurations', 'ml/jobs_crud/Test job with categorization_analyzer and categorization_filters', 'ml/jobs_get/Test get job given missing job_id', 'ml/jobs_get_result_buckets/Test mutually-exclusive params', @@ -91,6 +92,7 @@ integTestRunner { 'ml/validate/Test invalid job config', 'ml/validate/Test job config is invalid because model snapshot id set', 'ml/validate/Test job config that is invalid only because of the job ID', + 'ml/validate/Test job config with duplicate detector configurations', 'ml/validate_detector/Test invalid detector', 'ml/delete_forecast/Test delete on _all forecasts not allow no forecasts', 'ml/delete_forecast/Test delete forecast on missing forecast', diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index 800a536d57e..eb6186c3a64 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -1023,6 +1023,45 @@ "data_description" : {} } +--- +"Test put job with duplicate detector configurations": + + - do: + catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ + ml.put_job: + job_id: jobs-crud-duplicate-detectors + body: > + { + "analysis_config": { + "bucket_span": "1h", + "detectors": [ + {"function":"max", "field_name":"responsetime"}, + {"function":"max", "field_name":"responsetime"} + ] + }, + "data_description": { + "time_field": "@timestamp" + } + } + + - do: + catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ + ml.put_job: + job_id: jobs-crud-duplicate-detectors-with-explicit-indices + body: > + { + "analysis_config": { + "bucket_span": "1h", + "detectors": [ + {"function":"max", "field_name":"responsetime", "detector_index": 0}, + {"function":"max", "field_name":"responsetime", "detector_index": 1} + ] + }, + "data_description": { + "time_field": "@timestamp" + } + } + --- "Test put job after closing results index": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml index c3f9bcc8770..ae1db3552e4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml @@ -106,3 +106,40 @@ "data_description" : { } } + +--- +"Test job config with duplicate detector configurations": + + - do: + catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ + ml.validate: + body: > + { + "analysis_config": { + "bucket_span": "1h", + "detectors": [ + {"function":"max", "field_name":"responsetime"}, + {"function":"max", "field_name":"responsetime"} + ] + }, + "data_description": { + "time_field": "@timestamp" + } + } + + - do: + catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ + ml.validate: + body: > + { + "analysis_config": { + "bucket_span": "1h", + "detectors": [ + {"function":"max", "field_name":"responsetime", "detector_index": 0}, + {"function":"max", "field_name":"responsetime", "detector_index": 1} + ] + }, + "data_description": { + "time_field": "@timestamp" + } + } From 9aa8ab58ee65989df54fb914b9ae3c5935aba3bf Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 10 Apr 2019 01:38:57 -0400 Subject: [PATCH 34/60] Add 7.0.0 Release Notes (#41029) --- docs/reference/release-notes/7.0.0.asciidoc | 1639 ++++++++++++++++++- 1 file changed, 1635 insertions(+), 4 deletions(-) diff --git a/docs/reference/release-notes/7.0.0.asciidoc b/docs/reference/release-notes/7.0.0.asciidoc index fffd0f7467c..751c824207d 100644 --- a/docs/reference/release-notes/7.0.0.asciidoc +++ b/docs/reference/release-notes/7.0.0.asciidoc @@ -1,10 +1,1641 @@ [[release-notes-7.0.0]] == {es} version 7.0.0 -coming[7.0.0] +These release notes include all changes made in the alpha, beta, and RC +releases of 7.0.0. -//These release notes include all changes made in the alpha, beta, and RC -//releases of 7.0.0. +Also see <>. -//Also see <>. +[[breaking-7.0.0]] +[float] +=== Breaking changes +Aggregations:: +* Remove support for deprecated params._agg/_aggs for scripted metric aggregations {pull}32979[#32979] (issues: {issue}29328[#29328], {issue}31597[#31597]) +* Percentile/Ranks should return null instead of NaN when empty {pull}30460[#30460] (issue: {issue}29066[#29066]) +* Render sum as zero if count is zero for stats aggregation {pull}27193[#27193] (issue: {issue}26893[#26893]) + +Analysis:: +* Remove `delimited_payload_filter` {pull}27705[#27705] (issues: {issue}26625[#26625], {issue}27704[#27704]) +* Limit the number of tokens produced by _analyze {pull}27529[#27529] (issue: {issue}27038[#27038]) +* Add limits for ngram and shingle settings {pull}27211[#27211] (issue: {issue}25887[#25887]) + +Audit:: +* Logfile auditing settings remove after deprecation {pull}35205[#35205] +* Remove index audit output type {pull}37707[#37707] (issues: {issue}29881[#29881], {issue}37301[#37301]) + +Authentication:: +* Security: remove wrapping in put user response {pull}33512[#33512] (issue: {issue}32332[#32332]) +* Remove bwc logic for token invalidation {pull}36893[#36893] (issue: {issue}36727[#36727]) + +Authorization:: +* Remove aliases resolution limitations when security is enabled {pull}31952[#31952] (issue: {issue}31516[#31516]) +* Remove implicit index monitor privilege {pull}37774[#37774] + +Circuit Breakers:: +* Lower fielddata circuit breaker's default limit {pull}27162[#27162] (issue: {issue}27130[#27130]) + +CRUD:: +* Version conflict exception message enhancement {pull}29432[#29432] (issue: {issue}21278[#21278]) +* Using ObjectParser in UpdateRequest {pull}29293[#29293] (issue: {issue}28740[#28740]) +* Remove support for internal versioning for concurrency control {pull}38254[#38254] (issue: {issue}1078[#1078]) + +Distributed:: +* Remove undocumented action.master.force_local setting {pull}29351[#29351] +* Remove tribe node support {pull}28443[#28443] +* Forbid negative values for index.unassigned.node_left.delayed_timeout {pull}26828[#26828] +* Remove cluster state size {pull}40061[#40061] (issues: {issue}39806[#39806], {issue}39827[#39827], {issue}39951[#39951], {issue}40016[#40016]) + +Features/Features:: +* Remove Migration Upgrade and Assistance APIs {pull}40075[#40075] (issue: {issue}40014[#40014]) + +Features/Indices APIs:: +* Indices Exists API should return 404 for empty wildcards {pull}34499[#34499] +* Default to one shard {pull}30539[#30539] +* Limit the number of nested documents {pull}27405[#27405] (issue: {issue}26962[#26962]) + +Features/Ingest:: +* Add Configuration Except. Data to Metdata {pull}32322[#32322] (issue: {issue}27728[#27728]) +* Add ECS schema for user-agent ingest processor (#37727) {pull}37984[#37984] (issues: {issue}37329[#37329], {issue}37727[#37727]) +* Remove special handling for ingest plugins {pull}36967[#36967] (issues: {issue}36898[#36898], {issue}36956[#36956]) + +Features/Java Low Level REST Client:: +* Drop support for the low-level REST client on JDK 7 {pull}38540[#38540] (issue: {issue}29607[#29607]) + +Features/Watcher:: +* Remove Watcher Account "unsecure" settings {pull}36736[#36736] (issue: {issue}36403[#36403]) + +Features/Stats:: +* Remove the suggest metric from stats APIs {pull}29635[#29635] (issue: {issue}29589[#29589]) +* Align cat thread pool info to thread pool config {pull}29195[#29195] (issue: {issue}29123[#29123]) +* Align thread pool info to thread pool configuration {pull}29123[#29123] (issue: {issue}29113[#29113]) + +Geo:: +* Use geohash cell instead of just a corner in geo_bounding_box {pull}30698[#30698] (issue: {issue}25154[#25154]) + +Index APIs:: +* Always enforce cluster-wide shard limit {pull}34892[#34892] (issues: {issue}20705[#20705], {issue}34021[#34021]) + +Infra/Circuit Breakers:: +* Introduce durability of circuit breaking exception {pull}34460[#34460] (issue: {issue}31986[#31986]) +* Circuit-break based on real memory usage {pull}31767[#31767] + +Infra/Core:: +* Default node.name to the hostname {pull}33677[#33677] +* Remove bulk fallback for write thread pool {pull}29609[#29609] +* CCS: Drop http address from remote cluster info {pull}29568[#29568] (issue: {issue}29207[#29207]) +* Remove the index thread pool {pull}29556[#29556] +* Main response should not have status 503 when okay {pull}29045[#29045] (issue: {issue}8902[#8902]) +* Automatically prepare indices for splitting {pull}27451[#27451] +* Don't refresh on `_flush` `_force_merge` and `_upgrade` {pull}27000[#27000] (issue: {issue}26972[#26972]) + +Infra/Logging:: +* Elasticsearch json logging {pull}36833[#36833] (issue: {issue}32850[#32850]) + +Infra/Packaging:: +* Packaging: Remove windows bin files from the tar distribution {pull}30596[#30596] +* Package ingest-user-agent as a module {pull}36956[#36956] +* Package ingest-geoip as a module {pull}36898[#36898] + +Infra/REST API:: +* Remove GET support for clear cache indices {pull}29525[#29525] +* Clear Indices Cache API remove deprecated url params {pull}29068[#29068] + +Infra/Scripting:: +* Remove support for deprecated StoredScript contexts {pull}31394[#31394] (issues: {issue}27612[#27612], {issue}28939[#28939]) +* Remove getDate methods from ScriptDocValues {pull}30690[#30690] +* Drop `ScriptDocValues#date` and `ScriptDocValues#dates` in 7.0.0 {pull}30690[#30690] (issue: {issue}23008[#23008]) + +Infra/Settings:: +* Remove config prompting for secrets and text {pull}27216[#27216] + +Machine Learning:: +* Remove types from datafeed {pull}36538[#36538] (issue: {issue}34265[#34265]) + +Mapping:: +* Match phrase queries against non-indexed fields should throw an exception {pull}31060[#31060] +* Remove legacy mapping code. {pull}29224[#29224] +* Reject updates to the `_default_` mapping. {pull}29165[#29165] (issues: {issue}15613[#15613], {issue}28248[#28248]) +* Remove the `update_all_types` option. {pull}28288[#28288] +* Remove the `_default_` mapping. {pull}28248[#28248] +* Reject the `index_options` parameter for numeric fields {pull}26668[#26668] (issue: {issue}21475[#21475]) +* Update the default for include_type_name to false. {pull}37285[#37285] +* Support 'include_type_name' in RestGetIndicesAction {pull}37149[#37149] + +Network:: +* Remove http.enabled setting {pull}29601[#29601] (issue: {issue}12792[#12792]) +* Remove HTTP max content length leniency {pull}29337[#29337] +* Remove TLS 1.0 as a default SSL protocol {pull}37512[#37512] (issue: {issue}36021[#36021]) +* Security: remove SSL settings fallback {pull}36846[#36846] (issue: {issue}29797[#29797]) + +Percolator:: +* Remove deprecated percolator map_unmapped_fields_as_string setting {pull}28060[#28060] + +Ranking:: +* Add minimal sanity checks to custom/scripted similarities. {pull}33564[#33564] (issue: {issue}33309[#33309]) +* Scroll queries asking for rescore are considered invalid {pull}32918[#32918] (issue: {issue}31775[#31775]) +* Forbid negative scores in function_score query {pull}35709[#35709] (issue: {issue}33309[#33309]) +* Forbid negative field boosts in analyzed queries {pull}37930[#37930] (issue: {issue}33309[#33309]) + +Scripting:: +* Delete deprecated getValues from ScriptDocValues {pull}36183[#36183] (issue: {issue}22919[#22919]) + +Search:: +* Remove deprecated url parameters `_source_include` and `_source_exclude` {pull}35097[#35097] (issues: {issue}22792[#22792], {issue}33475[#33475]) +* Disallow negative query boost {pull}34486[#34486] (issue: {issue}33309[#33309]) +* Forbid negative `weight` in Function Score Query {pull}33390[#33390] (issue: {issue}31927[#31927]) +* In the field capabilities API, remove support for providing fields in the request body. {pull}30185[#30185] +* Remove deprecated options for query_string {pull}29203[#29203] (issue: {issue}25551[#25551]) +* Fix Laplace scorer to multiply by alpha (and not add) {pull}27125[#27125] +* Remove _primary and _replica shard preferences {pull}26791[#26791] (issue: {issue}26335[#26335]) +* Limit the number of expanded fields it query_string and simple_query_string {pull}26541[#26541] (issue: {issue}25105[#25105]) +* Make purely negative queries return scores of 0. {pull}26015[#26015] (issue: {issue}23449[#23449]) +* Remove the deprecated _termvector endpoint. {pull}36131[#36131] (issues: {issue}36098[#36098], {issue}8484[#8484]) +* Remove deprecated Graph endpoints {pull}35956[#35956] +* Validate metadata on `_msearch` {pull}35938[#35938] (issue: {issue}35869[#35869]) +* Make hits.total an object in the search response {pull}35849[#35849] (issue: {issue}33028[#33028]) +* Remove the distinction between query and filter context in QueryBuilders {pull}35354[#35354] (issue: {issue}35293[#35293]) +* Throw a parsing exception when boost is set in span_or query (#28390) {pull}34112[#34112] (issue: {issue}28390[#28390]) +* Track total hits up to 10,000 by default {pull}37466[#37466] (issue: {issue}33028[#33028]) +* Use mappings to format doc-value fields by default. {pull}30831[#30831] (issues: {issue}26948[#26948], {issue}29639[#29639]) + +Security:: +* Remove heuristics that enable security on trial licenses {pull}38075[#38075] (issue: {issue}38009[#38009]) + +Snapshot/Restore:: +* Include size of snapshot in snapshot metadata {pull}30890[#30890] (issue: {issue}18543[#18543]) +* Remove azure deprecated settings {pull}26099[#26099] (issue: {issue}23405[#23405]) + +Store:: +* Drop elasticsearch-translog for 7.0 {pull}33373[#33373] (issues: {issue}31389[#31389], {issue}32281[#32281]) +* completely drop `index.shard.check_on_startup: fix` for 7.0 {pull}33194[#33194] + +Suggesters:: +* Fix threshold frequency computation in Suggesters {pull}34312[#34312] (issue: {issue}34282[#34282]) +* Make Geo Context Mapping Parsing More Strict {pull}32821[#32821] (issues: {issue}32202[#32202], {issue}32412[#32412]) +* Remove the ability to index or query context suggestions without context {pull}31007[#31007] (issue: {issue}30712[#30712]) + +ZenDiscovery:: +* Best-effort cluster formation if unconfigured {pull}36215[#36215] +* Remove DiscoveryPlugin#getDiscoveryTypes {pull}38414[#38414] (issue: {issue}38410[#38410]) + +[[breaking-java-7.0.0]] +[float] +=== Breaking Java changes + +Aggregations:: +* Change GeoHashGrid.Bucket#getKey() to return String {pull}31748[#31748] (issue: {issue}30320[#30320]) + +Analysis:: +* Remove deprecated AnalysisPlugin#requriesAnalysisSettings method {pull}32037[#32037] (issue: {issue}32025[#32025]) + +Features/Java High Level REST Client:: +* Drop deprecated methods from Retry {pull}33925[#33925] +* Cluster health to default to cluster level {pull}31268[#31268] (issue: {issue}29331[#29331]) +* Remove deprecated API methods {pull}31200[#31200] (issue: {issue}31069[#31069]) + +Features/Java Low Level REST Client:: +* Drop deprecated methods {pull}33223[#33223] (issues: {issue}29623[#29623], {issue}30315[#30315]) +* Remove support for maxRetryTimeout from low-level REST client {pull}38085[#38085] (issues: {issue}25951[#25951], {issue}31834[#31834], {issue}33342[#33342]) + +Geo:: +* Decouple geojson parse logic from ShapeBuilders {pull}27212[#27212] + +Infra/Core:: +* Remove RequestBuilder from Action {pull}30966[#30966] +* Handle scheduler exceptions {pull}38014[#38014] (issues: {issue}28667[#28667], {issue}36137[#36137], {issue}37708[#37708]) + +Infra/Transport API:: +* Java api clean up: remove deprecated `isShardsAcked` {pull}28311[#28311] (issues: {issue}27784[#27784], {issue}27819[#27819]) + +ZenDiscovery:: +* Make node field in JoinRequest private {pull}36405[#36405] + +[[deprecation-7.0.0]] +[float] +=== Deprecations + +Aggregations:: +* Deprecate dots in aggregation names {pull}31468[#31468] (issues: {issue}17600[#17600], {issue}19040[#19040]) + +Analysis:: +* Replace parameter unicodeSetFilter with unicode_set_filter {pull}29215[#29215] (issue: {issue}22823[#22823]) +* Replace delimited_payload_filter by delimited_payload {pull}26625[#26625] (issue: {issue}21978[#21978]) +* Deprecate Standard Html Strip Analyzer in master {pull}26719[#26719] (issue: {issue}4704[#4704]) +* Remove `nGram` and `edgeNGram` token filter names (#38911) {pull}39070[#39070] (issues: {issue}30209[#30209], {issue}38911[#38911]) + +Audit:: +* Deprecate index audit output type {pull}37301[#37301] (issue: {issue}29881[#29881]) + +Core:: +* Deprecate use of scientific notation in epoch time parsing {pull}36691[#36691] +* Add backcompat for joda time formats {pull}36531[#36531] + +Cluster Coordination:: +* Deprecate size in cluster state response {pull}39951[#39951] (issue: {issue}39806[#39806]) + +Features/Indices APIs:: +* Default copy settings to true and deprecate on the REST layer {pull}30598[#30598] +* Reject setting index.optimize_auto_generated_id after version 7.0.0 {pull}28895[#28895] (issue: {issue}27600[#27600]) + +Features/Ingest:: +* Deprecate `_type` in simulate pipeline requests {pull}37949[#37949] (issue: {issue}37731[#37731]) + +Features/Java High Level REST Client:: +* Deprecate HLRC security methods {pull}37883[#37883] (issues: {issue}36938[#36938], {issue}37540[#37540]) +* Deprecate HLRC EmptyResponse used by security {pull}37540[#37540] (issue: {issue}36938[#36938]) + +Features/Watcher:: +* Deprecate xpack.watcher.history.cleaner_service.enabled {pull}37782[#37782] (issue: {issue}32041[#32041]) +* deprecate types for watcher {pull}37594[#37594] (issue: {issue}35190[#35190]) + +Graph:: +* Deprecate types in `_graph/explore` calls. {pull}40466[#40466] + +Infra/Core:: +* Deprecate negative epoch timestamps {pull}36793[#36793] +* Deprecate use of scientific notation in epoch time parsing {pull}36691[#36691] + +Infra/Packaging:: +* Deprecate fallback to java on PATH {pull}37990[#37990] + +Infra/Scripting:: +* Add types deprecation to script contexts {pull}37554[#37554] +* Deprecate _type from LeafDocLookup {pull}37491[#37491] +* Remove deprecated params.ctx {pull}36848[#36848] (issue: {issue}34059[#34059]) + +Infra/Transport API:: +* Deprecate the transport client in favour of the high-level REST client {pull}27085[#27085] + +Machine Learning:: +* Deprecate X-Pack centric ML endpoints {pull}36315[#36315] (issue: {issue}35958[#35958]) +* Adding ml_settings entry to HLRC and Docs for deprecation_info {pull}38118[#38118] +* Datafeed deprecation checks {pull}38026[#38026] (issue: {issue}37932[#37932]) +* Remove "8" prefixes from file structure finder timestamp formats {pull}38016[#38016] +* Adjust structure finder for Joda to Java time migration {pull}37306[#37306] +* Resolve 7.0.0 TODOs in ML code {pull}36842[#36842] (issue: {issue}29963[#29963]) + +Mapping:: +* Deprecate type exists requests. {pull}34663[#34663] +* Deprecate types in index API {pull}36575[#36575] (issues: {issue}35190[#35190], {issue}35790[#35790]) +* Deprecate uses of _type as a field name in queries {pull}36503[#36503] (issue: {issue}35190[#35190]) +* Deprecate types in update_by_query and delete_by_query {pull}36365[#36365] (issue: {issue}35190[#35190]) +* For msearch templates, make sure to use the right name for deprecation logging. {pull}36344[#36344] +* Deprecate types in termvector and mtermvector requests. {pull}36182[#36182] +* Deprecate types in update requests. {pull}36181[#36181] +* Deprecate types in document delete requests. {pull}36087[#36087] +* Deprecate types in get, exists, and multi get. {pull}35930[#35930] +* Deprecate types in search and multi search templates. {pull}35669[#35669] +* Deprecate types in explain requests. {pull}35611[#35611] +* Deprecate types in validate query requests. {pull}35575[#35575] +* Deprecate types in count and msearch. {pull}35421[#35421] (issue: {issue}34041[#34041]) +* Deprecate types in rollover index API {pull}38039[#38039] (issue: {issue}35190[#35190]) +* Deprecate types in get field mapping API {pull}37667[#37667] (issue: {issue}35190[#35190]) +* Deprecate types in the put mapping API. {pull}37280[#37280] (issues: {issue}29453[#29453], {issue}37285[#37285]) +* Support include_type_name in the field mapping and index template APIs. {pull}37210[#37210] +* Deprecate types in create index requests. {pull}37134[#37134] (issues: {issue}29453[#29453], {issue}37285[#37285]) +* Deprecate use of the _type field in aggregations. {pull}37131[#37131] (issue: {issue}36802[#36802]) +* Deprecate reference to _type in lookup queries {pull}37016[#37016] (issue: {issue}35190[#35190]) +* Deprecate the document create endpoint. {pull}36863[#36863] +* Deprecate types in index API {pull}36575[#36575] (issues: {issue}35190[#35190], {issue}35790[#35790]) +* Deprecate types in update APIs {pull}36225[#36225] + +Migration:: +* Deprecate X-Pack centric Migration endpoints {pull}35976[#35976] (issue: {issue}35958[#35958]) + +Monitoring:: +* Deprecate /_xpack/monitoring/* in favor of /_monitoring/* {pull}36130[#36130] (issue: {issue}35958[#35958]) + +Rollup:: +* Re-deprecate xpack rollup endpoints {pull}36451[#36451] (issue: {issue}36044[#36044]) +* Deprecate X-Pack centric rollup endpoints {pull}35962[#35962] (issue: {issue}35958[#35958]) + +Scripting:: +* Adds deprecation logging to ScriptDocValues#getValues. {pull}34279[#34279] (issue: {issue}22919[#22919]) +* Conditionally use java time api in scripting {pull}31441[#31441] + +Search:: +* Deprecate filtering on `_type`. {pull}29468[#29468] (issue: {issue}15613[#15613]) +* Remove X-Pack centric graph endpoints {pull}36010[#36010] (issue: {issue}35958[#35958]) +* Deprecate use of type in reindex request body {pull}36823[#36823] +* Add typless endpoints for get_source and exist_source {pull}36426[#36426] + +Security:: +* Deprecate X-Pack centric license endpoints {pull}35959[#35959] (issue: {issue}35958[#35958]) +* Deprecate /_xpack/security/* in favor of /_security/* {pull}36293[#36293] (issue: {issue}35958[#35958]) + +SQL:: +* Deprecate X-Pack SQL translate endpoint {pull}36030[#36030] +* Deprecate X-Pack centric SQL endpoints {pull}35964[#35964] (issue: {issue}35958[#35958]) + +Watcher:: +* Deprecate X-Pack centric watcher endpoints {pull}36218[#36218] (issue: {issue}35958[#35958]) + + +[[feature-7.0.0]] +[float] +=== New features + +Allocation:: +* Node repurpose tool {pull}39403[#39403] (issues: {issue}37347[#37347], {issue}37748[#37748]) + +Analysis:: +* Relax TermVectors API to work with textual fields other than TextFieldType {pull}31915[#31915] (issue: {issue}31902[#31902]) +* Add support for inlined user dictionary in Nori {pull}36123[#36123] (issue: {issue}35842[#35842]) +* Add a prebuilt ICU Analyzer {pull}34958[#34958] (issue: {issue}34285[#34285]) + +Authentication:: +* Add support for API keys to access Elasticsearch {pull}38291[#38291] (issue: {issue}34383[#34383]) +* OIDC realm authentication flows {pull}37787[#37787] +* OIDC Realm JWT+JWS related functionality {pull}37272[#37272] (issues: {issue}35339[#35339], {issue}37009[#37009]) +* OpenID Connect Realm base functionality {pull}37009[#37009] (issue: {issue}35339[#35339]) + +Authorization:: +* Allow custom authorization with an authorization engine {pull}38358[#38358] (issues: {issue}32435[#32435], {issue}36245[#36245], {issue}37328[#37328], {issue}37495[#37495], {issue}37785[#37785], {issue}38137[#38137], {issue}38219[#38219]) +* Wildcard IndicesPermissions don't cover .security {pull}36765[#36765] + +CCR:: +* Generalize search.remote settings to cluster.remote {pull}33413[#33413] +* Add ccr follow info api {pull}37408[#37408] (issue: {issue}37127[#37127]) + +Distributed:: +* Log messages from allocation commands {pull}25955[#25955] (issues: {issue}22821[#22821], {issue}25325[#25325]) + +Features/ILM:: +* Add unfollow action {pull}36970[#36970] (issue: {issue}34648[#34648]) + +Features/Ingest:: +* Revert "Introduce a Hashing Processor (#31087)" {pull}32178[#32178] +* Add ingest-attachment support for per document `indexed_chars` limit {pull}28977[#28977] (issue: {issue}28942[#28942]) + +Features/Java High Level REST Client:: +* GraphClient for the high level REST client and associated tests {pull}32366[#32366] + +Features/Monitoring:: +* Collect only display_name (for now) {pull}35265[#35265] (issue: {issue}8445[#8445]) + +Geo:: +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}36751[#36751] (issue: {issue}35320[#35320]) +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}35320[#35320] (issue: {issue}32039[#32039]) +* geotile_grid implementation {pull}37842[#37842] (issue: {issue}30240[#30240]) +* Fork Lucene's LatLonShape Classes to local lucene package {pull}36794[#36794] +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}36751[#36751] (issue: {issue}35320[#35320]) +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}35320[#35320] (issue: {issue}32039[#32039]) + +Infra/Core:: +* Skip shard refreshes if shard is `search idle` {pull}27500[#27500] + +Infra/Logging:: +* Logging: Unify log rotation for index/search slow log {pull}27298[#27298] + +Infra/Plugins:: +* Reload secure settings for plugins {pull}31383[#31383] (issue: {issue}29135[#29135]) + +Infra/REST API:: +* Add an `include_type_name` option. {pull}29453[#29453] (issue: {issue}15613[#15613]) + +Java High Level REST Client:: +* Add rollup search {pull}36334[#36334] (issue: {issue}29827[#29827]) + +Java Low Level REST Client:: +* Make warning behavior pluggable per request {pull}36345[#36345] +* Add PreferHasAttributeNodeSelector {pull}36005[#36005] + +Machine Learning:: +* Filter undefined job groups from update job calendar actions {pull}30757[#30757] +* Add delayed datacheck to the datafeed job runner {pull}35387[#35387] (issue: {issue}35131[#35131]) +* Adds set_upgrade_mode API endpoint {pull}37837[#37837] + +Mapping:: +* Add a `feature_vector` field. {pull}31102[#31102] (issue: {issue}27552[#27552]) +* Expose Lucene's FeatureField. {pull}30618[#30618] +* Make typeless APIs usable with indices whose type name is different from `_doc` {pull}35790[#35790] (issue: {issue}35190[#35190]) +* Give precedence to index creation when mixing typed templates with typeless index creation and vice-versa. {pull}37871[#37871] (issue: {issue}37773[#37773]) +* Add nanosecond field mapper {pull}37755[#37755] (issues: {issue}27330[#27330], {issue}32601[#32601]) + +Ranking:: +* Add ranking evaluation API {pull}27478[#27478] (issue: {issue}19195[#19195]) + +Recovery:: +* Allow to trim all ops above a certain seq# with a term lower than X, … {pull}31211[#31211] (issue: {issue}10708[#10708]) + +SQL:: +* Add basic support for ST_AsWKT geo function {pull}34205[#34205] +* Add support for SYS GEOMETRY_COLUMNS {pull}30496[#30496] (issue: {issue}29872[#29872]) +* Introduce HISTOGRAM grouping function {pull}36510[#36510] (issue: {issue}36509[#36509]) +* DATABASE() and USER() system functions {pull}35946[#35946] (issue: {issue}35863[#35863]) +* Introduce INTERVAL support {pull}35521[#35521] (issue: {issue}29990[#29990]) +* Allow sorting of groups by aggregates {pull}38042[#38042] (issue: {issue}35118[#35118]) +* Implement FIRST/LAST aggregate functions {pull}37936[#37936] (issue: {issue}35639[#35639]) +* Introduce SQL DATE data type {pull}37693[#37693] (issue: {issue}37340[#37340]) + +Search:: +* Add “took” timing info to response for _msearch/template API {pull}30961[#30961] (issue: {issue}30957[#30957]) +* Add allow_partial_search_results flag to search requests with default setting true {pull}28440[#28440] (issue: {issue}27435[#27435]) +* Enable adaptive replica selection by default {pull}26522[#26522] (issue: {issue}24915[#24915]) +* Add intervals query {pull}36135[#36135] (issues: {issue}29636[#29636], {issue}32406[#32406]) +* Added soft limit to open scroll contexts #25244 {pull}36009[#36009] (issue: {issue}25244[#25244]) +* Introduce ability to minimize round-trips in CCS {pull}37828[#37828] (issues: {issue}32125[#32125], {issue}37566[#37566]) +* Add script filter to intervals {pull}36776[#36776] +* Add the ability to set the number of hits to track accurately {pull}36357[#36357] (issue: {issue}33028[#33028]) +* Add a maximum search request size. {pull}26423[#26423] +* Make IntervalQuery available via the Query DSL {pull}36135[#36135] (issue: {issue}29636[#29636]) + +Security:: +* Switch internal security index to ".security-7" {pull}39337[#39337] (issue: {issue}39284[#39284]) + +Suggesters:: +* Serialize suggestion responses as named writeables {pull}30284[#30284] (issue: {issue}26585[#26585]) + + +[[enhancement-7.0.0]] +[float] +=== Enhancements + +Aggregations:: +* Uses MergingDigest instead of AVLDigest in percentiles agg {pull}28702[#28702] (issue: {issue}19528[#19528]) +* Added keyed response to pipeline percentile aggregations 22302 {pull}36392[#36392] (issue: {issue}22302[#22302]) +* Enforce max_buckets limit only in the final reduction phase {pull}36152[#36152] (issues: {issue}32125[#32125], {issue}35921[#35921]) +* Histogram aggs: add empty buckets only in the final reduce step {pull}35921[#35921] +* Handles exists query in composite aggs {pull}35758[#35758] +* Added parent validation for auto date histogram {pull}35670[#35670] +* Add Composite to AggregationBuilders {pull}38207[#38207] (issue: {issue}38020[#38020]) +* Allow nested fields in the composite aggregation {pull}37178[#37178] (issue: {issue}28611[#28611]) +* Remove single shard optimization when suggesting shard_size {pull}37041[#37041] (issue: {issue}32125[#32125]) +* Use List instead of priority queue for stable sorting in bucket sort aggregator {pull}36748[#36748] (issue: {issue}36322[#36322]) +* Keys are compared in BucketSortPipelineAggregation so making key type… {pull}36407[#36407] + +Allocation:: +* Fail start on obsolete indices documentation {pull}37786[#37786] (issue: {issue}27073[#27073]) +* Fail start on invalid index metadata {pull}37748[#37748] (issue: {issue}27073[#27073]) +* Fail start of non-data node if node has data {pull}37347[#37347] (issue: {issue}27073[#27073]) + +Analysis:: +* Allow word_delimiter_graph_filter to not adjust internal offsets {pull}36699[#36699] (issues: {issue}33710[#33710], {issue}34741[#34741]) +* Ensure TokenFilters only produce single tokens when parsing synonyms {pull}34331[#34331] (issue: {issue}34298[#34298]) +* Allow word_delimiter_graph_filter to not adjust internal offsets {pull}36699[#36699] (issues: {issue}33710[#33710], {issue}34741[#34741]) + +Audit:: +* Add "request.id" to file audit logs {pull}35536[#35536] +* Security Audit includes HTTP method for requests {pull}37322[#37322] (issue: {issue}29765[#29765]) +* Add X-Forwarded-For to the logfile audit {pull}36427[#36427] + +Authentication:: +* Invalidate Token API enhancements - HLRC {pull}36362[#36362] (issue: {issue}35388[#35388]) +* Add DEBUG/TRACE logs for LDAP bind {pull}36028[#36028] +* Add Tests for findSamlRealm {pull}35905[#35905] +* Add realm information for Authenticate API {pull}35648[#35648] +* Formal support for "password_hash" in Put User {pull}35242[#35242] (issue: {issue}34729[#34729]) +* Propagate auth result to listeners {pull}36900[#36900] (issue: {issue}30794[#30794]) +* Reorder realms based on last success {pull}36878[#36878] +* Improve error message for 6.x style realm settings {pull}36876[#36876] (issues: {issue}30241[#30241], {issue}36026[#36026]) +* Change missing authn message to not mention tokens {pull}36750[#36750] +* Invalidate Token API enhancements - HLRC {pull}36362[#36362] (issue: {issue}35388[#35388]) +* Enhance Invalidate Token API {pull}35388[#35388] (issues: {issue}34556[#34556], {issue}35115[#35115]) + +Authorization:: +* Improve exact index matching performance {pull}36017[#36017] +* `manage_token` privilege for `kibana_system` {pull}35751[#35751] +* Grant .tasks access to kibana_system role {pull}35573[#35573] +* Add apm_user reserved role {pull}38206[#38206] +* Permission for restricted indices {pull}37577[#37577] (issue: {issue}34454[#34454]) +* Remove kibana_user and kibana_dashboard_only_user index privileges {pull}37441[#37441] +* Create snapshot role {pull}35820[#35820] (issue: {issue}34454[#34454]) + +Build:: +* Sounds like typo in exception message {pull}35458[#35458] +* Allow set section in setup section of REST tests {pull}34678[#34678] + +CCR:: +* Add time since last auto follow fetch to auto follow stats {pull}36542[#36542] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Clean followed leader index UUIDs in auto follow metadata {pull}36408[#36408] (issue: {issue}33007[#33007]) +* Change AutofollowCoordinator to use wait_for_metadata_version {pull}36264[#36264] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Refactor AutoFollowCoordinator to track leader indices per remote cluster {pull}36031[#36031] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Concurrent file chunk fetching for CCR restore {pull}38495[#38495] +* Tighten mapping syncing in ccr remote restore {pull}38071[#38071] (issues: {issue}36879[#36879], {issue}37887[#37887]) +* Do not allow put mapping on follower {pull}37675[#37675] (issue: {issue}30086[#30086]) +* Added ccr to xpack usage infrastructure {pull}37256[#37256] (issue: {issue}37221[#37221]) +* FollowingEngine should fail with 403 if operation has no seqno assigned {pull}37213[#37213] +* Added auto_follow_exception.timestamp field to auto follow stats {pull}36947[#36947] +* Add time since last auto follow fetch to auto follow stats {pull}36542[#36542] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Reduce retention lease sync intervals {pull}40302[#40302] +* Renew retention leases while following {pull}39335[#39335] (issues: {issue}37165[#37165], {issue}38718[#38718]) +* Reduce refresh when lookup term in FollowingEngine {pull}39184[#39184] +* Integrate retention leases to recovery from remote {pull}38829[#38829] (issue: {issue}37165[#37165]) +* Enable removal of retention leases {pull}38751[#38751] (issue: {issue}37165[#37165]) +* Introduce forget follower API {pull}39718[#39718] (issue: {issue}37165[#37165]) + +Client:: +* Fixed required fields and paths list {pull}39358[#39358] + +Cluster Coordination:: +* Remove timeout task after completing cluster state publication {pull}40411[#40411] +* Use default discovery implementation for single-node discovery {pull}40036[#40036] +* Do not log unsuccessful join attempt each time {pull}39756[#39756] + +Core:: +* Override the JVM DNS cache policy {pull}36570[#36570] +* Replace usages of AtomicBoolean based block of code by the RunOnce class {pull}35553[#35553] (issue: {issue}35489[#35489]) +* Added wait_for_metadata_version parameter to cluster state api. {pull}35535[#35535] +* Extract RunOnce into a dedicated class {pull}35489[#35489] +* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) + +CRUD:: +* Rename seq# powered optimistic concurrency control parameters to ifSeqNo/ifPrimaryTerm {pull}36757[#36757] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Expose Sequence Number based Optimistic Concurrency Control in the rest layer {pull}36721[#36721] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add doc's sequence number + primary term to GetResult and use it for updates {pull}36680[#36680] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add seq no powered optimistic locking support to the index and delete transport actions {pull}36619[#36619] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add Seq# based optimistic concurrency control to UpdateRequest {pull}37872[#37872] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Introduce ssl settings to reindex from remote {pull}37527[#37527] (issues: {issue}29755[#29755], {issue}37287[#37287]) +* Use Sequence number powered OCC for processing updates {pull}37308[#37308] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Document Seq No powered optimistic concurrency control {pull}37284[#37284] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Enable IPv6 URIs in reindex from remote {pull}36874[#36874] +* Rename seq# powered optimistic concurrency control parameters to ifSeqNo/ifPrimaryTerm {pull}36757[#36757] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Expose Sequence Number based Optimistic Concurrency Control in the rest layer {pull}36721[#36721] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add doc's sequence number + primary term to GetResult and use it for updates {pull}36680[#36680] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add seq no powered optimistic locking support to the index and delete transport actions {pull}36619[#36619] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Set acking timeout to 0 on dynamic mapping update {pull}31140[#31140] (issues: {issue}30672[#30672], {issue}30844[#30844]) + +Discovery-Plugins:: +* Adds connect and read timeouts to discovery-gce {pull}28193[#28193] (issue: {issue}24313[#24313]) + +Distributed:: +* [Close Index API] Mark shard copy as stale if needed during shard verification {pull}36755[#36755] +* [Close Index API] Refactor MetaDataIndexStateService {pull}36354[#36354] (issue: {issue}36249[#36249]) +* [Close Index API] Add TransportShardCloseAction for pre-closing verifications {pull}36249[#36249] +* TransportResyncReplicationAction should not honour blocks {pull}35795[#35795] (issues: {issue}35332[#35332], {issue}35597[#35597]) +* Expose all permits acquisition in IndexShard and TransportReplicationAction {pull}35540[#35540] (issue: {issue}33888[#33888]) +* [RCI] Check blocks while having index shard permit in TransportReplicationAction {pull}35332[#35332] (issue: {issue}33888[#33888]) +* Recover retention leases during peer recovery {pull}38435[#38435] (issue: {issue}37165[#37165]) +* Lift retention lease expiration to index shard {pull}38380[#38380] (issues: {issue}37165[#37165], {issue}37963[#37963], {issue}38070[#38070]) +* Introduce retention lease background sync {pull}38262[#38262] (issue: {issue}37165[#37165]) +* Allow shards of closed indices to be replicated as regular shards {pull}38024[#38024] (issue: {issue}33888[#33888]) +* Expose retention leases in shard stats {pull}37991[#37991] (issue: {issue}37165[#37165]) +* Introduce retention leases versioning {pull}37951[#37951] (issue: {issue}37165[#37165]) +* Soft-deletes policy should always fetch latest leases {pull}37940[#37940] (issues: {issue}37165[#37165], {issue}37375[#37375]) +* Sync retention leases on expiration {pull}37902[#37902] (issue: {issue}37165[#37165]) +* Ignore shard started requests when primary term does not match {pull}37899[#37899] (issue: {issue}33888[#33888]) +* Move update and delete by query to use seq# for optimistic concurrency control {pull}37857[#37857] (issues: {issue}10708[#10708], {issue}36148[#36148], {issue}37639[#37639]) +* Introduce retention lease serialization {pull}37447[#37447] (issues: {issue}37165[#37165], {issue}37398[#37398]) +* Add run under primary permit method {pull}37440[#37440] (issue: {issue}37398[#37398]) +* Introduce retention lease syncing {pull}37398[#37398] (issue: {issue}37165[#37165]) +* Introduce retention lease persistence {pull}37375[#37375] (issue: {issue}37165[#37165]) +* Add validation for retention lease construction {pull}37312[#37312] (issue: {issue}37165[#37165]) +* Introduce retention lease expiration {pull}37195[#37195] (issue: {issue}37165[#37165]) +* Introduce shard history retention leases {pull}37167[#37167] (issue: {issue}37165[#37165]) +* [Close Index API] Add unique UUID to ClusterBlock {pull}36775[#36775] +* [Close Index API] Mark shard copy as stale if needed during shard verification {pull}36755[#36755] +* [Close Index API] Propagate tasks ids between Freeze, Close and Verify Shard actions {pull}36630[#36630] +* Always initialize the global checkpoint {pull}34381[#34381] +* Introduce retention lease actions {pull}38756[#38756] (issue: {issue}37165[#37165]) +* Add dedicated retention lease exceptions {pull}38754[#38754] (issue: {issue}37165[#37165]) +* Copy retention leases when trim unsafe commits {pull}37995[#37995] (issue: {issue}37165[#37165]) +* Allow retention lease operations under blocks {pull}39089[#39089] (issues: {issue}34648[#34648], {issue}37165[#37165]) +* Remove retention leases when unfollowing {pull}39088[#39088] (issues: {issue}34648[#34648], {issue}37165[#37165]) +* Introduce retention lease state file {pull}39004[#39004] (issues: {issue}37165[#37165], {issue}38588[#38588], {issue}39032[#39032]) +* Enable soft-deletes by default for 7.0+ indices {pull}38929[#38929] (issue: {issue}36141[#36141]) + +Engine:: +* Remove versionType from translog {pull}31945[#31945] +* Do retry if primary fails on AsyncAfterWriteAction {pull}31857[#31857] (issues: {issue}31716[#31716], {issue}31755[#31755]) +* handle AsyncAfterWriteAction exception before listener is registered {pull}31755[#31755] (issue: {issue}31716[#31716]) +* Use IndexWriter#flushNextBuffer to free memory {pull}27753[#27753] +* Remove pre 6.0.0 support from InternalEngine {pull}27720[#27720] +* Add sequence numbers based optimistic concurrency control support to Engine {pull}36467[#36467] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Require soft-deletes when access changes snapshot {pull}36446[#36446] +* Use delCount of SegmentInfos to calculate numDocs {pull}36323[#36323] +* Always configure soft-deletes field of IndexWriterConfig {pull}36196[#36196] (issue: {issue}36141[#36141]) +* Enable soft-deletes by default on 7.0.0 or later {pull}36141[#36141] +* Always return false from `refreshNeeded` on ReadOnlyEngine {pull}35837[#35837] (issue: {issue}35785[#35785]) +* Add a `_freeze` / `_unfreeze` API {pull}35592[#35592] (issue: {issue}34352[#34352]) +* [RCI] Add IndexShardOperationPermits.asyncBlockOperations(ActionListener) {pull}34902[#34902] (issue: {issue}33888[#33888]) +* Specialize pre-closing checks for engine implementations {pull}38702[#38702] +* Ensure that max seq # is equal to the global checkpoint when creating ReadOnlyEngines {pull}37426[#37426] +* Enable Bulk-Merge if all source remains {pull}37269[#37269] +* Rename setting to enable mmap {pull}37070[#37070] (issue: {issue}36668[#36668]) +* Add hybridfs store type {pull}36668[#36668] +* Introduce time-based retention policy for soft-deletes {pull}34943[#34943] (issue: {issue}34908[#34908]) +* Handle AsyncAfterWriteAction failure on primary in the same way as failures on replicas {pull}31969[#31969] (issues: {issue}31716[#31716], {issue}31755[#31755]) +* Explicitly advance max_seq_no before indexing {pull}39473[#39473] (issue: {issue}38879[#38879]) +* Also mmap cfs files for hybridfs {pull}38940[#38940] (issue: {issue}36668[#36668]) + +Features/CAT APIs:: +* Expose `search.throttled` on `_cat/indices` {pull}37073[#37073] (issue: {issue}34352[#34352]) + +Features/Features:: +* Run Node deprecation checks locally (#38065) {pull}38250[#38250] (issue: {issue}38065[#38065]) + +Features/ILM:: +* Ensure ILM policies run safely on leader indices {pull}38140[#38140] (issue: {issue}34648[#34648]) +* Skip Shrink when numberOfShards not changed {pull}37953[#37953] (issue: {issue}33275[#33275]) +* Inject Unfollow before Rollover and Shrink {pull}37625[#37625] (issue: {issue}34648[#34648]) +* Add set_priority action to ILM {pull}37397[#37397] (issue: {issue}36905[#36905]) +* Add Freeze Action {pull}36910[#36910] (issue: {issue}34630[#34630]) + +Features/Indices APIs:: +* Add cluster-wide shard limit {pull}32856[#32856] (issue: {issue}20705[#20705]) +* Remove RestGetAllAliasesAction {pull}31308[#31308] (issue: {issue}31129[#31129]) +* Add rollover-creation-date setting to rolled over index {pull}31144[#31144] (issue: {issue}30887[#30887]) +* add is-write-index flag to aliases {pull}30942[#30942] +* Make index and bulk APIs work without types. {pull}29479[#29479] +* Simplify deprecation issue levels {pull}36326[#36326] +* New mapping signature and mapping string source fixed. {pull}37401[#37401] + +Features/Ingest:: +* Add ignore_missing property to foreach filter (#22147) {pull}31578[#31578] (issue: {issue}22147[#22147]) +* Compile mustache template only if field includes '{{'' {pull}37207[#37207] (issue: {issue}37120[#37120]) +* Move ingest-geoip default databases out of config {pull}36949[#36949] (issue: {issue}36898[#36898]) +* Make the ingest-geoip databases even lazier to load {pull}36679[#36679] +* Updates the grok patterns to be consistent with the logstash {pull}27181[#27181] + +Features/Java High Level REST Client:: +* HLRC API for _termvectors {pull}32610[#32610] (issue: {issue}27205[#27205]) +* Fix strict setting exception handling {pull}37247[#37247] (issue: {issue}37090[#37090]) +* Use nonblocking entity for requests {pull}32249[#32249] + +Features/Monitoring:: +* Make Exporters Async {pull}35765[#35765] (issue: {issue}35743[#35743]) +* Adding mapping for hostname field {pull}37288[#37288] +* Remove types from internal monitoring templates and bump to api 7 {pull}39888[#39888] (issue: {issue}38637[#38637]) + +Features/Stats:: +* Stats to record how often the ClusterState diff mechanism is used successfully {pull}26973[#26973] +* Add JVM dns cache expiration config to JvmInfo {pull}36372[#36372] + +Features/Watcher:: +* Validate email adresses when storing a watch {pull}34042[#34042] (issue: {issue}33980[#33980]) +* Move watcher to use seq# and primary term for concurrency control {pull}37977[#37977] (issues: {issue}10708[#10708], {issue}37872[#37872]) +* Use ILM for Watcher history deletion {pull}37443[#37443] (issue: {issue}32041[#32041]) +* Add whitelist to HttpClient {pull}36817[#36817] (issue: {issue}29937[#29937]) +* Remove the index type from internal watcher indexes {pull}39761[#39761] (issue: {issue}38637[#38637]) + +Geo:: +* Adds a name of the field to geopoint parsing errors {pull}36529[#36529] (issue: {issue}15965[#15965]) +* Add support to ShapeBuilders for building Lucene geometry {pull}35707[#35707] (issue: {issue}35320[#35320]) +* Add ST_WktToSQL function {pull}35416[#35416] (issue: {issue}29872[#29872]) + +Index APIs:: +* Add cluster-wide shard limit warnings {pull}34021[#34021] (issues: {issue}20705[#20705], {issue}32856[#32856]) + +Infra/Circuit Breakers:: +* Have circuit breaker succeed on unknown mem usage {pull}33125[#33125] (issue: {issue}31767[#31767]) +* Account for XContent overhead in in-flight breaker {pull}31613[#31613] +* Script Stats: Add compilation limit counter to stats {pull}26387[#26387] + +Infra/Core:: +* Add RunOnce utility class that executes a Runnable exactly once {pull}35484[#35484] +* Improved IndexNotFoundException's default error message {pull}34649[#34649] (issue: {issue}34628[#34628]) +* fix a few versionAdded values in ElasticsearchExceptions {pull}37877[#37877] +* Add simple method to write collection of writeables {pull}37448[#37448] (issue: {issue}37398[#37398]) +* Date/Time parsing: Use java time API instead of exception handling {pull}37222[#37222] +* [API] spelling: interruptible {pull}37049[#37049] (issue: {issue}37035[#37035]) +* Enhancements to IndicesQueryCache. {pull}39099[#39099] (issue: {issue}37117[#37117]) +* Change zone formatting for all printers {pull}39568[#39568] (issue: {issue}38471[#38471]) + +Infra/Logging:: +* Trim the JSON source in indexing slow logs {pull}38081[#38081] (issue: {issue}38080[#38080]) +* Optimize warning header de-duplication {pull}37725[#37725] (issues: {issue}35754[#35754], {issue}37530[#37530], {issue}37597[#37597], {issue}37622[#37622]) +* Remove warn-date from warning headers {pull}37622[#37622] (issues: {issue}35754[#35754], {issue}37530[#37530], {issue}37597[#37597]) +* Add some deprecation optimizations {pull}37597[#37597] (issues: {issue}35754[#35754], {issue}37530[#37530]) +* Only update response headers if we have a new one {pull}37590[#37590] (issues: {issue}35754[#35754], {issue}37530[#37530]) + +Infra/Packaging:: +* Choose JVM options ergonomically {pull}30684[#30684] +* Add OS/architecture classifier to distributions {pull}37881[#37881] +* Change file descriptor limit to 65535 {pull}37537[#37537] (issue: {issue}35839[#35839]) +* Exit batch files explictly using ERRORLEVEL {pull}29583[#29583] (issue: {issue}29582[#29582]) +* Add no-jdk distributions {pull}39882[#39882] +* Allow AVX-512 on JDK 11+ {pull}40828[#40828] (issue: {issue}32138[#32138]) + +Infra/REST API:: +* Remove hand-coded XContent duplicate checks {pull}34588[#34588] (issues: {issue}22073[#22073], {issue}22225[#22225], {issue}22253[#22253]) +* Add the `include_type_name` option to the search and document APIs. {pull}29506[#29506] (issue: {issue}15613[#15613]) +* Validate `op_type` for `_create` {pull}27483[#27483] + +Infra/Scripting:: +* Tests: Add support for custom contexts to mock scripts {pull}34100[#34100] +* Reflect factory signatures in painless classloader {pull}34088[#34088] +* Handle missing values in painless {pull}32207[#32207] (issue: {issue}29286[#29286]) +* Add getZone to JodaCompatibleZonedDateTime {pull}37084[#37084] +* [Painless] Add boxed type to boxed type casts for method/return {pull}36571[#36571] + +Infra/Packaging:: +* Use bundled JDK in Docker images {pull}40238[#40238] +* Upgrade bundled JDK and Docker images to JDK 12 {pull}40229[#40229] +* Bundle java in distributions {pull}38013[#38013] (issue: {issue}31845[#31845]) + +Infra/Settings:: +* Settings: Add keystore creation to add commands {pull}26126[#26126] +* Separate out validation of groups of settings {pull}34184[#34184] +* Provide a clearer error message on keystore add {pull}39327[#39327] (issue: {issue}39324[#39324]) + +Infra/Transport API:: +* Change BWC version for VerifyRepositoryResponse {pull}30796[#30796] (issue: {issue}30762[#30762]) + +Ingest:: +* Grok fix duplicate patterns JAVACLASS and JAVAFILE {pull}35886[#35886] +* Implement Drop Processor {pull}32278[#32278] (issue: {issue}23726[#23726]) + +Java High Level REST Client:: +* Add get users action {pull}36332[#36332] (issue: {issue}29827[#29827]) +* Add delete template API {pull}36320[#36320] (issue: {issue}27205[#27205]) +* Implement get-user-privileges API {pull}36292[#36292] +* Get Deprecation Info API {pull}36279[#36279] (issue: {issue}29827[#29827]) +* Add support for Follow Stats API {pull}36253[#36253] (issue: {issue}33824[#33824]) +* Add support for CCR Stats API {pull}36213[#36213] (issue: {issue}33824[#33824]) +* Put Role {pull}36209[#36209] (issue: {issue}29827[#29827]) +* Add index templates exist API {pull}36132[#36132] (issue: {issue}27205[#27205]) +* Add support for CCR Get Auto Follow Pattern apis {pull}36049[#36049] (issue: {issue}33824[#33824]) +* Add support for CCR Delete Auto Follow Pattern API {pull}35981[#35981] (issue: {issue}33824[#33824]) +* Remove fromXContent from IndexUpgradeInfoResponse {pull}35934[#35934] +* Add delete expired data API {pull}35906[#35906] (issue: {issue}29827[#29827]) +* Execute watch API {pull}35868[#35868] (issue: {issue}29827[#29827]) +* Add ability to put user with a password hash {pull}35844[#35844] (issue: {issue}35242[#35242]) +* Add ML find file structure API {pull}35833[#35833] (issue: {issue}29827[#29827]) +* Add support for get roles API {pull}35787[#35787] (issue: {issue}29827[#29827]) +* Added support for CCR Put Auto Follow Pattern API {pull}35780[#35780] (issue: {issue}33824[#33824]) +* XPack ML info action {pull}35777[#35777] (issue: {issue}29827[#29827]) +* ML Delete event from Calendar {pull}35760[#35760] (issue: {issue}29827[#29827]) +* Add ML revert model snapshot API {pull}35750[#35750] (issue: {issue}29827[#29827]) +* ML Get Calendar Events {pull}35747[#35747] (issue: {issue}29827[#29827]) +* Add high-level REST client API for `_freeze` and `_unfreeze` {pull}35723[#35723] (issue: {issue}34352[#34352]) +* Fix issue in equals impl for GlobalOperationPrivileges {pull}35721[#35721] +* ML Delete job from calendar {pull}35713[#35713] (issue: {issue}29827[#29827]) +* ML Add Event To Calendar API {pull}35704[#35704] (issue: {issue}29827[#29827]) +* Add ML update model snapshot API (#35537) {pull}35694[#35694] (issue: {issue}29827[#29827]) +* Add support for CCR Unfollow API {pull}35693[#35693] (issue: {issue}33824[#33824]) +* Clean up PutLicenseResponse {pull}35689[#35689] (issue: {issue}35547[#35547]) +* Clean up StartBasicResponse {pull}35688[#35688] (issue: {issue}35547[#35547]) +* Add support for put privileges API {pull}35679[#35679] +* ML Add Job to Calendar API {pull}35666[#35666] (issue: {issue}29827[#29827]) +* Add support for CCR Resume Follow API {pull}35638[#35638] (issue: {issue}33824[#33824]) +* Add support for get application privileges API {pull}35556[#35556] (issue: {issue}29827[#29827]) +* Clean up XPackInfoResponse class and related tests {pull}35547[#35547] +* Add parameters to stopRollupJob API {pull}35545[#35545] (issue: {issue}34811[#34811]) +* Add ML delete model snapshot API {pull}35537[#35537] (issue: {issue}29827[#29827]) +* Add get watch API {pull}35531[#35531] (issue: {issue}29827[#29827]) +* Add ML Update Filter API {pull}35522[#35522] (issue: {issue}29827[#29827]) +* Add ml get filters api {pull}35502[#35502] (issue: {issue}29827[#29827]) +* Add ML get model snapshots API {pull}35487[#35487] (issue: {issue}29827[#29827]) +* Add "_has_privileges" API to Security Client {pull}35479[#35479] (issue: {issue}29827[#29827]) +* Add Delete Privileges API to HLRC {pull}35454[#35454] (issue: {issue}29827[#29827]) +* Add support for CCR Put Follow API {pull}35409[#35409] +* Add ML delete filter action {pull}35382[#35382] (issue: {issue}29827[#29827]) +* Add delete user action {pull}35294[#35294] (issue: {issue}29827[#29827]) +* HLRC for _mtermvectors {pull}35266[#35266] (issues: {issue}27205[#27205], {issue}33447[#33447]) +* Reindex API with wait_for_completion false {pull}35202[#35202] (issue: {issue}27205[#27205]) +* Add watcher stats API {pull}35185[#35185] (issue: {issue}29827[#29827]) +* HLRC support for getTask {pull}35166[#35166] (issue: {issue}27205[#27205]) +* Add GetRollupIndexCaps API {pull}35102[#35102] (issue: {issue}29827[#29827]) +* HLRC: migration api - upgrade {pull}34898[#34898] (issue: {issue}29827[#29827]) +* Add stop rollup job support to HL REST Client {pull}34702[#34702] (issue: {issue}29827[#29827]) +* Bulk Api support for global parameters {pull}34528[#34528] (issue: {issue}26026[#26026]) +* Add delete rollup job support to HL REST Client {pull}34066[#34066] (issue: {issue}29827[#29827]) +* Add support for get license basic/trial status API {pull}33176[#33176] (issue: {issue}29827[#29827]) +* Add machine learning open job {pull}32860[#32860] (issue: {issue}29827[#29827]) +* Add ML HLRC wrapper and put_job API call {pull}32726[#32726] +* Add Get Snapshots High Level REST API {pull}31537[#31537] (issue: {issue}27205[#27205]) + +Java Low Level REST Client:: +* On retry timeout add root exception {pull}25576[#25576] + +License:: +* Require acknowledgement to start_trial license {pull}30135[#30135] (issue: {issue}30134[#30134]) +* Handle malformed license signatures {pull}37137[#37137] (issue: {issue}35340[#35340]) + +Machine Learning:: +* Create the ML annotations index {pull}36731[#36731] (issues: {issue}26034[#26034], {issue}33376[#33376]) +* Split in batches and migrate all jobs and datafeeds {pull}36716[#36716] (issue: {issue}32905[#32905]) +* Add cluster setting to enable/disable config migration {pull}36700[#36700] (issue: {issue}32905[#32905]) +* Add audits when deprecation warnings occur with datafeed start {pull}36233[#36233] +* Add lazy parsing for DatafeedConfig:Aggs,Query {pull}36117[#36117] +* Add support for lazy nodes (#29991) {pull}34538[#34538] (issue: {issue}29991[#29991]) +* Move ML Optimistic Concurrency Control to Seq No {pull}38278[#38278] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add explanation so far to file structure finder exceptions {pull}38191[#38191] (issue: {issue}29821[#29821]) +* Add reason field in JobTaskState {pull}38029[#38029] (issue: {issue}34431[#34431]) +* Add _meta information to all ML indices {pull}37964[#37964] +* Add upgrade mode docs, hlrc, and fix bug {pull}37942[#37942] +* Tighten up use of aliases rather than concrete indices {pull}37874[#37874] +* Add support for single bucket aggs in Datafeeds {pull}37544[#37544] (issue: {issue}36838[#36838]) +* Create the ML annotations index {pull}36731[#36731] (issues: {issue}26034[#26034], {issue}33376[#33376]) +* Merge the Jindex master feature branch {pull}36702[#36702] (issue: {issue}32905[#32905]) +* Add cluster setting to enable/disable config migration {pull}36700[#36700] (issue: {issue}32905[#32905]) +* Allow stop unassigned datafeed and relax unset upgrade mode wait {pull}39034[#39034] + +Mapping:: +* Log document id when MapperParsingException occurs {pull}37800[#37800] (issue: {issue}37658[#37658]) +* [API] spelling: unknown {pull}37056[#37056] (issue: {issue}37035[#37035]) +* Make SourceToParse immutable {pull}36971[#36971] +* Use index-prefix fields for terms of length min_chars - 1 {pull}36703[#36703] +* Introduce a parameter suppress_types_warnings. {pull}38923[#38923] + +Network:: +* Add cors support to NioHttpServerTransport {pull}30827[#30827] (issue: {issue}28898[#28898]) +* Reintroduce mandatory http pipelining support {pull}30820[#30820] +* Make http pipelining support mandatory {pull}30695[#30695] (issues: {issue}28898[#28898], {issue}29500[#29500]) +* Add nio http server transport {pull}29587[#29587] (issue: {issue}28898[#28898]) +* Add class for serializing message to bytes {pull}29384[#29384] (issue: {issue}28898[#28898]) +* Selectors operate on channel contexts {pull}28468[#28468] (issue: {issue}27260[#27260]) +* Unify nio read / write channel contexts {pull}28160[#28160] (issue: {issue}27260[#27260]) +* Create nio-transport plugin for NioTransport {pull}27949[#27949] (issue: {issue}27260[#27260]) +* Add elasticsearch-nio jar for base nio classes {pull}27801[#27801] (issue: {issue}27802[#27802]) +* Unify transport settings naming {pull}36623[#36623] +* Add sni name to SSLEngine in netty transport {pull}33144[#33144] (issue: {issue}32517[#32517]) +* Add cors support to NioHttpServerTransport {pull}30827[#30827] (issue: {issue}28898[#28898]) +* Reintroduce mandatory http pipelining support {pull}30820[#30820] +* Make http pipelining support mandatory {pull}30695[#30695] (issues: {issue}28898[#28898], {issue}29500[#29500]) +* Add nio http server transport {pull}29587[#29587] (issue: {issue}28898[#28898]) +* Selectors operate on channel contexts {pull}28468[#28468] (issue: {issue}27260[#27260]) +* Unify nio read / write channel contexts {pull}28160[#28160] (issue: {issue}27260[#27260]) +* Create nio-transport plugin for NioTransport {pull}27949[#27949] (issue: {issue}27260[#27260]) +* Add elasticsearch-nio jar for base nio classes {pull}27801[#27801] (issue: {issue}27802[#27802]) +* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) +* Add read timeouts to http module {pull}27713[#27713] +* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) +* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) +* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) +* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) +* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) +* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) +* Enable TLSv1.3 by default for JDKs with support {pull}38103[#38103] (issue: {issue}32276[#32276]) + +Packaging:: +* Introduce Docker images build {pull}36246[#36246] +* Move creation of temporary directory to Java {pull}36002[#36002] (issue: {issue}31003[#31003]) + +Percolator:: +* Make the `type` parameter optional when percolating existing documents. {pull}39987[#39987] (issue: {issue}39963[#39963]) +* Add support for selecting percolator query candidate matches containing geo_point based queries {pull}26040[#26040] + +Plugins:: +* Plugin install: don't print download progress in batch mode {pull}36361[#36361] + +Ranking:: +* Add k parameter to PrecisionAtK metric {pull}27569[#27569] +* Vector field {pull}33022[#33022] (issue: {issue}31615[#31615]) + +Recovery:: +* SyncedFlushService.getShardRoutingTable() should use metadata to check for index existence {pull}37691[#37691] (issue: {issue}33888[#33888]) +* Make prepare engine step of recovery source non-blocking {pull}37573[#37573] (issue: {issue}37174[#37174]) +* Make recovery source send operations non-blocking {pull}37503[#37503] (issue: {issue}37458[#37458]) +* Prepare to make send translog of recovery non-blocking {pull}37458[#37458] (issue: {issue}37291[#37291]) +* Make finalize step of recovery source non-blocking {pull}37388[#37388] (issue: {issue}37291[#37291]) +* Make recovery source partially non-blocking {pull}37291[#37291] (issue: {issue}36195[#36195]) +* Do not mutate RecoveryResponse {pull}37204[#37204] (issue: {issue}37174[#37174]) +* Don't block on peer recovery on the target side {pull}37076[#37076] (issue: {issue}36195[#36195]) +* Reduce recovery time with compress or secure transport {pull}36981[#36981] (issue: {issue}33844[#33844]) +* Translog corruption marker {pull}33415[#33415] (issue: {issue}31389[#31389]) +* Do not wait for advancement of checkpoint in recovery {pull}39006[#39006] (issues: {issue}38949[#38949], {issue}39000[#39000]) + +Rollup:: +* Add non-X-Pack centric rollup endpoints {pull}36383[#36383] (issues: {issue}35958[#35958], {issue}35962[#35962]) +* Add more diagnostic stats to job {pull}35471[#35471] +* Add `wait_for_completion` option to StopRollupJob API {pull}34811[#34811] (issue: {issue}34574[#34574]) +* Replace the TreeMap in the composite aggregation {pull}36675[#36675] + +Recovery:: +* Exposed engine must include all operations below global checkpoint during rollback {pull}36159[#36159] (issue: {issue}32867[#32867]) + +Scripting:: +* Update joda compat methods to use compat class {pull}36654[#36654] +* [Painless] Add boxed type to boxed type casts for method/return {pull}36571[#36571] +* [Painless] Add def to boxed type casts {pull}36506[#36506] + +Settings:: +* Add user-defined cluster metadata {pull}33325[#33325] (issue: {issue}33220[#33220]) + +Search:: +* Make limit on number of expanded fields configurable {pull}35284[#35284] (issues: {issue}26541[#26541], {issue}34778[#34778]) +* Search: Simply SingleFieldsVisitor {pull}34052[#34052] +* Don't count hits via the collector if the hit count can be computed from index stats. {pull}33701[#33701] +* Limit the number of concurrent requests per node {pull}31206[#31206] (issue: {issue}31192[#31192]) +* Default max concurrent search req. numNodes * 5 {pull}31171[#31171] (issues: {issue}30783[#30783], {issue}30994[#30994]) +* Change ScriptException status to 400 (bad request) {pull}30861[#30861] (issue: {issue}12315[#12315]) +* Change default value to true for transpositions parameter of fuzzy query {pull}26901[#26901] +* Introducing "took" time (in ms) for `_msearch` {pull}23767[#23767] (issue: {issue}23131[#23131]) +* Add copy constructor to SearchRequest {pull}36641[#36641] (issue: {issue}32125[#32125]) +* Add raw sort values to SearchSortValues transport serialization {pull}36617[#36617] (issue: {issue}32125[#32125]) +* Add sort and collapse info to SearchHits transport serialization {pull}36555[#36555] (issue: {issue}32125[#32125]) +* Add default methods to DocValueFormat {pull}36480[#36480] +* Respect indices options on _msearch {pull}35887[#35887] +* Allow efficient can_match phases on frozen indices {pull}35431[#35431] (issues: {issue}34352[#34352], {issue}34357[#34357]) +* Add a new query type - ScriptScoreQuery {pull}34533[#34533] (issues: {issue}23850[#23850], {issue}27588[#27588], {issue}30303[#30303]) +* Tie break on cluster alias when merging shard search failures {pull}38715[#38715] (issue: {issue}38672[#38672]) +* Add finalReduce flag to SearchRequest {pull}38104[#38104] (issues: {issue}37000[#37000], {issue}37838[#37838]) +* Streamline skip_unavailable handling {pull}37672[#37672] (issue: {issue}32125[#32125]) +* Expose sequence number and primary terms in search responses {pull}37639[#37639] +* Add support for merging multiple search responses into one {pull}37566[#37566] (issue: {issue}32125[#32125]) +* Allow field types to optimize phrase prefix queries {pull}37436[#37436] (issue: {issue}31921[#31921]) +* Add support for providing absolute start time to SearchRequest {pull}37142[#37142] (issue: {issue}32125[#32125]) +* Ensure that local cluster alias is never treated as remote {pull}37121[#37121] (issues: {issue}32125[#32125], {issue}36997[#36997]) +* [API] spelling: cacheable {pull}37047[#37047] (issue: {issue}37035[#37035]) +* Add ability to suggest shard_size on coord node rewrite {pull}37017[#37017] (issues: {issue}32125[#32125], {issue}36997[#36997], {issue}37000[#37000]) +* Skip final reduction if SearchRequest holds a cluster alias {pull}37000[#37000] (issues: {issue}32125[#32125], {issue}36997[#36997]) +* Add support for local cluster alias to SearchRequest {pull}36997[#36997] (issue: {issue}32125[#32125]) +* Use SearchRequest copy constructor in ExpandSearchPhase {pull}36772[#36772] (issue: {issue}36641[#36641]) +* Add raw sort values to SearchSortValues transport serialization {pull}36617[#36617] (issue: {issue}32125[#32125]) +* Avoid BytesRef's copying in ScriptDocValues's Strings {pull}29581[#29581] (issue: {issue}29567[#29567]) + +Security:: +* Make credentials mandatory when launching xpack/migrate {pull}36197[#36197] (issues: {issue}29847[#29847], {issue}33972[#33972]) +* Move CAS operations in TokenService to sequence numbers {pull}38311[#38311] (issues: {issue}10708[#10708], {issue}37872[#37872]) +* Cleanup construction of interceptors {pull}38294[#38294] +* Add passphrase support to elasticsearch-keystore {pull}37472[#37472] (issue: {issue}32691[#32691]) +* Types removal security index template {pull}39705[#39705] (issue: {issue}38637[#38637]) +* Types removal security index template {pull}39542[#39542] (issue: {issue}38637[#38637]) + +Snapshot/Restore:: +* #31608 Add S3 Setting to Force Path Type Access {pull}34721[#34721] (issue: {issue}31608[#31608]) +* Allow Parallel Restore Operations {pull}36397[#36397] +* Repo Creation out of ClusterStateTask {pull}36157[#36157] (issue: {issue}9488[#9488]) +* Add read-only repository verification {pull}35731[#35731] (issue: {issue}35703[#35703]) +* RestoreService should update primary terms when restoring shards of existing indices {pull}38177[#38177] (issue: {issue}33888[#33888]) +* Allow open indices to be restored {pull}37733[#37733] +* Create specific exception for when snapshots are in progress {pull}37550[#37550] (issue: {issue}37541[#37541]) +* Make Atomic Blob Writes Mandatory {pull}37168[#37168] (issues: {issue}37011[#37011], {issue}37066[#37066]) +* Speed up HDFS Repository Writes {pull}37069[#37069] +* Implement Atomic Blob Writes for HDFS Repository {pull}37066[#37066] (issue: {issue}37011[#37011]) +* [API] spelling: repositories {pull}37053[#37053] (issue: {issue}37035[#37035]) +* Use CancellableThreads to Abort {pull}35901[#35901] (issue: {issue}21759[#21759]) +* S3 client encryption {pull}30513[#30513] (issues: {issue}11128[#11128], {issue}16843[#16843]) +* Mark Deleted Snapshot Directories with Tombstones {pull}40228[#40228] (issue: {issue}39852[#39852]) + +Stats:: +* Handle OS pretty name on old OS without OS release {pull}35453[#35453] (issue: {issue}35440[#35440]) + +Store:: +* Add RemoveCorruptedShardDataCommand {pull}32281[#32281] (issues: {issue}31389[#31389], {issue}32279[#32279]) +* Add option to force load term dict into memory {pull}39741[#39741] + +SQL:: +* Introduce support for NULL values {pull}34573[#34573] (issue: {issue}32079[#32079]) +* Extend the ODBC metric by differentiating between 32 and 64bit platforms {pull}36753[#36753] (issue: {issue}36740[#36740]) +* Fix wrong appliance of StackOverflow limit for IN {pull}36724[#36724] (issue: {issue}36592[#36592]) +* Introduce NOW/CURRENT_TIMESTAMP function {pull}36562[#36562] (issue: {issue}36534[#36534]) +* Move requests' parameters to requests JSON body {pull}36149[#36149] (issue: {issue}35992[#35992]) +* Make INTERVAL millis optional {pull}36043[#36043] (issue: {issue}36032[#36032]) +* Implement data type verification for conditionals {pull}35916[#35916] (issue: {issue}35907[#35907]) +* Implement GREATEST and LEAST functions {pull}35879[#35879] (issue: {issue}35878[#35878]) +* Implement null safe equality operator `<=>` {pull}35873[#35873] (issue: {issue}35871[#35871]) +* SYS COLUMNS returns ODBC specific schema {pull}35870[#35870] (issue: {issue}35376[#35376]) +* Polish grammar for intervals {pull}35853[#35853] +* Add filtering to SYS TYPES {pull}35852[#35852] (issue: {issue}35342[#35342]) +* Implement NULLIF(expr1, expr2) function {pull}35826[#35826] (issue: {issue}35818[#35818]) +* Lock down JDBC driver {pull}35798[#35798] (issue: {issue}35437[#35437]) +* Implement NVL(expr1, expr2) {pull}35794[#35794] (issue: {issue}35782[#35782]) +* Implement ISNULL(expr1, expr2) {pull}35793[#35793] (issue: {issue}35781[#35781]) +* Implement IFNULL variant of COALESCE {pull}35762[#35762] (issue: {issue}35749[#35749]) +* XPack FeatureSet functionality {pull}35725[#35725] (issue: {issue}34821[#34821]) +* Perform lazy evaluation of mismatched mappings {pull}35676[#35676] (issues: {issue}35659[#35659], {issue}35675[#35675]) +* Improve validation of unsupported fields {pull}35675[#35675] (issue: {issue}35673[#35673]) +* Move internals from Joda to java.time {pull}35649[#35649] (issue: {issue}35633[#35633]) +* Allow look-ahead resolution of aliases for WHERE clause {pull}38450[#38450] (issue: {issue}29983[#29983]) +* Implement CURRENT_DATE {pull}38175[#38175] (issue: {issue}38160[#38160]) +* Generate relevant error message when grouping functions are not used in GROUP BY {pull}38017[#38017] (issue: {issue}37952[#37952]) +* Skip the nested and object field types in case of an ODBC request {pull}37948[#37948] (issue: {issue}37801[#37801]) +* Add protocol tests and remove jdbc_type from drivers response {pull}37516[#37516] (issues: {issue}36635[#36635], {issue}36882[#36882]) +* Remove slightly used meta commands {pull}37506[#37506] (issue: {issue}37409[#37409]) +* Describe aliases as views {pull}37496[#37496] (issue: {issue}37422[#37422]) +* Make `FULL` non-reserved keyword in the grammar {pull}37377[#37377] (issue: {issue}37376[#37376]) +* Use declared source for error messages {pull}37161[#37161] +* Improve error message when unable to translate to ES query DSL {pull}37129[#37129] (issue: {issue}37040[#37040]) +* [API] spelling: subtract {pull}37055[#37055] (issue: {issue}37035[#37035]) +* [API] spelling: similar {pull}37054[#37054] (issue: {issue}37035[#37035]) +* [API] spelling: input {pull}37048[#37048] (issue: {issue}37035[#37035]) +* Enhance message for PERCENTILE[_RANK] with field as 2nd arg {pull}36933[#36933] (issue: {issue}36903[#36903]) +* Preserve original source for each expression {pull}36912[#36912] (issue: {issue}36894[#36894]) +* Extend the ODBC metric by differentiating between 32 and 64bit platforms {pull}36753[#36753] (issue: {issue}36740[#36740]) +* Fix wrong appliance of StackOverflow limit for IN {pull}36724[#36724] (issue: {issue}36592[#36592]) +* Enhance checks for inexact fields {pull}39427[#39427] (issue: {issue}38501[#38501]) +* Change the default precision for CURRENT_TIMESTAMP function {pull}39391[#39391] (issue: {issue}39288[#39288]) +* Add "fuzziness" option to QUERY and MATCH function predicates {pull}40529[#40529] (issue: {issue}40495[#40495]) +* Add "validate.properties" property to JDBC's allowed list of settings {pull}39050[#39050] (issue: {issue}38068[#38068]) + +Suggesters:: +* Remove unused empty constructors from suggestions classes {pull}37295[#37295] +* [API] spelling: likelihood {pull}37052[#37052] (issue: {issue}37035[#37035]) + +Task Management:: +* Periodically try to reassign unassigned persistent tasks {pull}36069[#36069] (issue: {issue}35792[#35792]) +* Only require task permissions {pull}35667[#35667] (issue: {issue}35573[#35573]) +* Retry if task can't be written {pull}35054[#35054] (issue: {issue}33764[#33764]) + +ZenDiscovery:: +* Introduce vote withdrawal {pull}35446[#35446] +* Add basic Zen1 transport-level BWC {pull}35443[#35443] +* Add diff-based publishing {pull}35290[#35290] +* Introduce auto_shrink_voting_configuration setting {pull}35217[#35217] +* Introduce transport API for cluster bootstrapping {pull}34961[#34961] +* Reconfigure cluster as its membership changes {pull}34592[#34592] (issue: {issue}33924[#33924]) +* Fail fast on disconnects {pull}34503[#34503] +* Add storage-layer disruptions to CoordinatorTests {pull}34347[#34347] +* Add low-level bootstrap implementation {pull}34345[#34345] +* Gather votes from all nodes {pull}34335[#34335] +* Add Cluster State Applier {pull}34257[#34257] +* Add safety phase to CoordinatorTests {pull}34241[#34241] +* Integrate FollowerChecker with Coordinator {pull}34075[#34075] +* Integrate LeaderChecker with Coordinator {pull}34049[#34049] +* Trigger join when active master detected {pull}34008[#34008] +* Update PeerFinder term on term bump {pull}33992[#33992] +* Calculate optimal cluster configuration {pull}33924[#33924] +* Introduce FollowersChecker {pull}33917[#33917] +* Integrate publication pipeline into Coordinator {pull}33771[#33771] +* Add DisruptableMockTransport {pull}33713[#33713] +* Implement basic cluster formation {pull}33668[#33668] +* Introduce LeaderChecker {pull}33024[#33024] +* Add leader-side join handling logic {pull}33013[#33013] +* Add PeerFinder#onFoundPeersUpdated {pull}32939[#32939] +* Introduce PreVoteCollector {pull}32847[#32847] +* Introduce ElectionScheduler {pull}32846[#32846] +* Introduce ElectionScheduler {pull}32709[#32709] +* Add HandshakingTransportAddressConnector {pull}32643[#32643] (issue: {issue}32246[#32246]) +* Add UnicastConfiguredHostsResolver {pull}32642[#32642] (issue: {issue}32246[#32246]) +* Cluster state publication pipeline {pull}32584[#32584] (issue: {issue}32006[#32006]) +* Introduce gossip-like discovery of master nodes {pull}32246[#32246] +* Add core coordination algorithm for cluster state publishing {pull}32171[#32171] (issue: {issue}32006[#32006]) +* Add term and config to cluster state {pull}32100[#32100] (issue: {issue}32006[#32006]) +* Add discovery types to cluster stats {pull}36442[#36442] +* Introduce `zen2` discovery type {pull}36298[#36298] +* Persist cluster states the old way on non-master-eligible nodes {pull}36247[#36247] (issue: {issue}3[#3]) +* Storage layer WriteStateException propagation {pull}36052[#36052] +* Implement Tombstone REST APIs {pull}36007[#36007] +* Update default for USE_ZEN2 to true {pull}35998[#35998] +* Add warning if cluster fails to form fast enough {pull}35993[#35993] +* Allow Setting a List of Bootstrap Nodes to Wait for {pull}35847[#35847] +* VotingTombstone class {pull}35832[#35832] +* PersistedState interface implementation {pull}35819[#35819] +* Support rolling upgrades from Zen1 {pull}35737[#35737] +* Add lag detector {pull}35685[#35685] +* Move ClusterState fields to be persisted to ClusterState.MetaData {pull}35625[#35625] +* Introduce ClusterBootstrapService {pull}35488[#35488] +* Introduce vote withdrawal {pull}35446[#35446] +* Add basic Zen1 transport-level BWC {pull}35443[#35443] +* Add elasticsearch-node detach-cluster tool {pull}37979[#37979] +* Deprecate minimum_master_nodes {pull}37868[#37868] +* Step down as master when configured out of voting configuration {pull}37802[#37802] (issue: {issue}37712[#37712]) +* Enforce cluster UUIDs {pull}37775[#37775] +* Bubble exceptions up in ClusterApplierService {pull}37729[#37729] +* Use m_m_nodes from Zen1 master for Zen2 bootstrap {pull}37701[#37701] +* Add tool elasticsearch-node unsafe-bootstrap {pull}37696[#37696] +* Report terms and version if cluster does not form {pull}37473[#37473] +* Bootstrap a Zen2 cluster once quorum is discovered {pull}37463[#37463] +* Zen2: Add join validation {pull}37203[#37203] +* Publish cluster states in chunks {pull}36973[#36973] + + + +[[bug-7.0.0]] +[float] +=== Bug fixes + +Aggregations:: +* Fix InternalAutoDateHistogram reproducible failure {pull}32723[#32723] (issue: {issue}32215[#32215]) +* fix MultiValuesSourceFieldConfig toXContent {pull}36525[#36525] (issue: {issue}36474[#36474]) +* Cache the score of the parent document in the nested agg {pull}36019[#36019] (issues: {issue}34555[#34555], {issue}35985[#35985]) +* Correct implemented interface of ParsedReverseNested {pull}35455[#35455] (issue: {issue}35449[#35449]) +* Handle IndexOrDocValuesQuery in composite aggregation {pull}35392[#35392] +* Don't load global ordinals with the `map` execution_hint {pull}37833[#37833] (issue: {issue}37705[#37705]) +* Issue #37303 - Invalid variance fix {pull}37384[#37384] (issue: {issue}37303[#37303]) +* Skip sibling pipeline aggregators reduction during non-final reduce {pull}40101[#40101] (issue: {issue}40059[#40059]) +* Extend nextDoc to delegate to the wrapped doc-value iterator for date_nanos {pull}39176[#39176] (issue: {issue}39107[#39107]) +* Only create MatrixStatsResults on final reduction {pull}38130[#38130] (issue: {issue}37587[#37587]) + +Allocation:: +* Fix _host based require filters {pull}38173[#38173] +* ALLOC: Fail Stale Primary Alloc. Req. without Data {pull}37226[#37226] (issue: {issue}37098[#37098]) + +Analysis:: +* Close #26771: beider_morse phonetic encoder failure when languageset unspecified {pull}26848[#26848] (issue: {issue}26771[#26771]) +* Fix PreConfiguredTokenFilters getSynonymFilter() implementations {pull}38839[#38839] (issue: {issue}38793[#38793]) + +Audit:: +* Fix origin.type for connection_* events {pull}36410[#36410] +* Fix IndexAuditTrail rolling restart on rollover edge {pull}35988[#35988] (issue: {issue}33867[#33867]) +* Fix NPE in Logfile Audit Filter {pull}38120[#38120] (issue: {issue}38097[#38097]) +* LoggingAuditTrail correctly handle ReplicatedWriteRequest {pull}39925[#39925] (issue: {issue}39555[#39555]) + +Authorization:: +* Empty GetAliases authorization fix {pull}34444[#34444] (issue: {issue}31952[#31952]) + +Authentication:: +* Fix kerberos setting registration {pull}35986[#35986] (issues: {issue}30241[#30241], {issue}35942[#35942]) +* Add support for Kerberos V5 Oid {pull}35764[#35764] (issue: {issue}34763[#34763]) +* Enhance parsing of StatusCode in SAML Responses {pull}38628[#38628] +* Limit token expiry to 1 hour maximum {pull}38244[#38244] +* Fix expired token message in Exception header {pull}37196[#37196] +* Fix NPE in CachingUsernamePasswordRealm {pull}36953[#36953] (issue: {issue}36951[#36951]) +* Allow non super users to create API keys {pull}40028[#40028] (issue: {issue}40029[#40029]) +* Use consistent view of realms for authentication {pull}38815[#38815] (issue: {issue}30301[#30301]) +* Correct authenticate response for API key {pull}39684[#39684] +* Fix security index auto-create and state recovery race {pull}39582[#39582] + +Build:: +* Use explicit deps on test tasks for check {pull}36325[#36325] +* Fix jdbc jar pom to not include deps {pull}36036[#36036] (issue: {issue}32014[#32014]) +* Fix official plugins list {pull}35661[#35661] (issue: {issue}35623[#35623]) + +CCR:: +* Fix follow stats API's follower index filtering feature {pull}36647[#36647] +* AutoFollowCoordinator should tolerate that auto follow patterns may be removed {pull}35945[#35945] (issue: {issue}35937[#35937]) +* Only auto follow indices when all primary shards have started {pull}35814[#35814] (issue: {issue}35480[#35480]) +* Avoid NPE in follower stats when no tasks metadata {pull}35802[#35802] +* Fix the names of CCR stats endpoints in usage API {pull}35438[#35438] +* Prevent CCR recovery from missing documents {pull}38237[#38237] +* Fix file reading in ccr restore service {pull}38117[#38117] +* Correct argument names in update mapping/settings from leader {pull}38063[#38063] +* Ensure changes requests return the latest mapping version {pull}37633[#37633] +* Do not set fatal exception when shard follow task is stopped. {pull}37603[#37603] +* Add fatal_exception field for ccr stats in monitoring mapping {pull}37563[#37563] +* Do not add index event listener if CCR disabled {pull}37432[#37432] +* When removing an AutoFollower also mark it as removed. {pull}37402[#37402] (issue: {issue}36761[#36761]) +* Make shard follow tasks more resilient for restarts {pull}37239[#37239] (issue: {issue}37231[#37231]) +* Resume follow Api should not require a request body {pull}37217[#37217] (issue: {issue}37022[#37022]) +* Report error if auto follower tries auto follow a leader index with soft deletes disabled {pull}36886[#36886] (issue: {issue}33007[#33007]) +* Remote cluster license checker and no license info. {pull}36837[#36837] (issue: {issue}36815[#36815]) +* Make CCR resilient against missing remote cluster connections {pull}36682[#36682] (issues: {issue}36255[#36255], {issue}36667[#36667]) +* AutoFollowCoordinator and follower index already created {pull}36540[#36540] (issue: {issue}33007[#33007]) +* Safe publication of AutoFollowCoordinator {pull}40153[#40153] (issue: {issue}38560[#38560]) +* Enable reading auto-follow patterns from x-content {pull}40130[#40130] (issue: {issue}40128[#40128]) +* Stop auto-followers on shutdown {pull}40124[#40124] +* Protect against the leader index being removed {pull}39351[#39351] (issue: {issue}39308[#39308]) +* Handle the fact that `ShardStats` instance may have no commit or seqno stats {pull}38782[#38782] (issue: {issue}38779[#38779]) +* Fix LocalIndexFollowingIT#testRemoveRemoteConnection() test {pull}38709[#38709] (issue: {issue}38695[#38695]) +* Fix shard follow task startup error handling {pull}39053[#39053] (issue: {issue}38779[#38779]) +* Filter out upgraded version index settings when starting index following {pull}38838[#38838] (issue: {issue}38835[#38835]) + +Circuit Breakers:: +* Modify `BigArrays` to take name of circuit breaker {pull}36461[#36461] (issue: {issue}31435[#31435]) + +Core:: +* Fix CompositeBytesReference#slice to not throw AIOOBE with legal offsets. {pull}35955[#35955] (issue: {issue}35950[#35950]) +* Suppress CachedTimeThread in hot threads output {pull}35558[#35558] (issue: {issue}23175[#23175]) +* Upgrade to Joda 2.10.1 {pull}35410[#35410] (issue: {issue}33749[#33749]) + +CRUD:: +* Fix Reindex from remote query logic {pull}36908[#36908] +* Synchronize WriteReplicaResult callbacks {pull}36770[#36770] +* Cascading primary failure lead to MSU too low {pull}40249[#40249] +* Store Pending Deletions Fix {pull}40345[#40345] (issue: {issue}40249[#40249]) +* ShardBulkAction ignore primary response on primary {pull}38901[#38901] + +Cluster Coordination:: +* Fix node tool cleanup {pull}39389[#39389] +* Avoid serialising state if it was already serialised {pull}39179[#39179] +* Do not perform cleanup if Manifest write fails with dirty exception {pull}40519[#40519] (issue: {issue}39077[#39077]) +* Cache compressed cluster state size {pull}39827[#39827] (issue: {issue}39806[#39806]) +* Drop node if asymmetrically partitioned from master {pull}39598[#39598] +* Fixing the custom object serialization bug in diffable utils. {pull}39544[#39544] +* Clean GatewayAllocator when stepping down as master {pull}38885[#38885] + +Distributed:: +* Combine the execution of an exclusive replica operation with primary term update {pull}36116[#36116] (issue: {issue}35850[#35850]) +* ActiveShardCount should not fail when closing the index {pull}35936[#35936] +* TransportVerifyShardBeforeCloseAction should force a flush {pull}38401[#38401] (issues: {issue}33888[#33888], {issue}37961[#37961]) +* Fix limit on retaining sequence number {pull}37992[#37992] (issue: {issue}37165[#37165]) +* Close Index API should force a flush if a sync is needed {pull}37961[#37961] (issues: {issue}33888[#33888], {issue}37426[#37426]) +* Force Refresh Listeners when Acquiring all Operation Permits {pull}36835[#36835] +* Replaced the word 'shards' with 'replicas' in an error message. (#36234) {pull}36275[#36275] (issue: {issue}36234[#36234]) +* Ignore waitForActiveShards when syncing leases {pull}39224[#39224] (issue: {issue}39089[#39089]) +* Fix synchronization in LocalCheckpointTracker#contains {pull}38755[#38755] (issues: {issue}33871[#33871], {issue}38633[#38633]) +* Enforce retention leases require soft deletes {pull}39922[#39922] (issue: {issue}39914[#39914]) +* Treat TransportService stopped error as node is closing {pull}39800[#39800] (issue: {issue}39584[#39584]) +* Use cause to determine if node with primary is closing {pull}39723[#39723] (issue: {issue}39584[#39584]) +* Don’t ack if unable to remove failing replica {pull}39584[#39584] (issue: {issue}39467[#39467]) +* Fix NPE on Stale Index in IndicesService {pull}38891[#38891] (issue: {issue}38845[#38845]) + +Engine:: +* Set Lucene version upon index creation. {pull}36038[#36038] (issue: {issue}33826[#33826]) +* Wrap can_match reader with ElasticsearchDirectoryReader {pull}35857[#35857] +* Copy checkpoint atomically when rolling generation {pull}35407[#35407] +* Subclass NIOFSDirectory instead of using FileSwitchDirectory {pull}37140[#37140] (issues: {issue}36668[#36668], {issue}37111[#37111]) +* Bubble up exception when processing NoOp {pull}39338[#39338] (issue: {issue}38898[#38898]) +* ReadOnlyEngine should update translog recovery state information {pull}39238[#39238] +* Advance max_seq_no before add operation to Lucene {pull}38879[#38879] (issue: {issue}31629[#31629]) + +Features/Features:: +* Only count some fields types for deprecation check {pull}40166[#40166] +* Deprecation check for indices with very large numbers of fields {pull}39869[#39869] (issue: {issue}39851[#39851]) + +Features/ILM:: +* Preserve ILM operation mode when creating new lifecycles {pull}38134[#38134] (issues: {issue}38229[#38229], {issue}38230[#38230]) +* Retry ILM steps that fail due to SnapshotInProgressException {pull}37624[#37624] (issues: {issue}37541[#37541], {issue}37552[#37552]) +* Remove `indexing_complete` when removing policy {pull}36620[#36620] +* Handle failure to release retention leases in ILM {pull}39281[#39281] (issue: {issue}39181[#39181]) +* Correct ILM metadata minimum compatibility version {pull}40569[#40569] (issue: {issue}40565[#40565]) +* Handle null retention leases in WaitForNoFollowersStep {pull}40477[#40477] +* Allow ILM to stop if indices have nonexistent policies {pull}40820[#40820] (issue: {issue}40824[#40824]) + +Features/Indices APIs:: +* Validate top-level keys for create index request (#23755) {pull}23869[#23869] (issue: {issue}23755[#23755]) +* Reject delete index requests with a body {pull}37501[#37501] (issue: {issue}8217[#8217]) +* Fix duplicate phrase in shrink/split error message {pull}36734[#36734] (issue: {issue}36729[#36729]) +* Get Aliases with wildcard exclusion expression {pull}34230[#34230] (issues: {issue}33518[#33518], {issue}33805[#33805], {issue}34144[#34144]) + +Features/Ingest:: +* Fix Deprecation Warning in Script Proc. {pull}32407[#32407] +* Support unknown fields in ingest pipeline map configuration {pull}38352[#38352] (issue: {issue}36938[#36938]) +* Ingest node - user_agent, move device parsing to an object {pull}38115[#38115] (issues: {issue}37329[#37329], {issue}38094[#38094]) +* Fix on_failure with Drop processor {pull}36686[#36686] (issue: {issue}36151[#36151]) +* Support default pipelines + bulk upserts {pull}36618[#36618] (issue: {issue}36219[#36219]) +* Ingest ingest then create index {pull}39607[#39607] (issues: {issue}32758[#32758], {issue}32786[#32786], {issue}36545[#36545]) + +Features/Java High Level REST Client:: +* Drop extra level from user parser {pull}34932[#34932] +* Update IndexTemplateMetaData to allow unknown fields {pull}38448[#38448] (issue: {issue}36938[#36938]) +* `if_seq_no` and `if_primary_term` parameters aren't wired correctly in REST Client's CRUD API {pull}38411[#38411] +* Update Rollup Caps to allow unknown fields {pull}38339[#38339] (issue: {issue}36938[#36938]) +* Fix ILM explain response to allow unknown fields {pull}38054[#38054] (issue: {issue}36938[#36938]) +* Fix ILM status to allow unknown fields {pull}38043[#38043] (issue: {issue}36938[#36938]) +* Fix ILM Lifecycle Policy to allow unknown fields {pull}38041[#38041] (issue: {issue}36938[#36938]) +* Update authenticate to allow unknown fields {pull}37713[#37713] (issue: {issue}36938[#36938]) +* Update verify repository to allow unknown fields {pull}37619[#37619] (issue: {issue}36938[#36938]) +* Update get users to allow unknown fields {pull}37593[#37593] (issue: {issue}36938[#36938]) +* Update Execute Watch to allow unknown fields {pull}37498[#37498] (issue: {issue}36938[#36938]) +* Update Put Watch to allow unknown fields {pull}37494[#37494] (issue: {issue}36938[#36938]) +* Update Delete Watch to allow unknown fields {pull}37435[#37435] (issue: {issue}36938[#36938]) +* Fix rest reindex test for IPv4 addresses {pull}37310[#37310] +* Fix weighted_avg parser not found for RestHighLevelClient {pull}37027[#37027] (issue: {issue}36861[#36861]) + +Features/Java Low Level REST Client:: +* Remove I/O pool blocking sniffing call from onFailure callback, add some logic around host exclusion {pull}27985[#27985] (issue: {issue}27984[#27984]) +* Fix potential IllegalCapacityException in LLRC when selecting nodes {pull}37821[#37821] + +Features/Monitoring:: +* Allow built-in monitoring_user role to call GET _xpack API {pull}38060[#38060] (issue: {issue}37970[#37970]) +* Don't emit deprecation warnings on calls to the monitoring bulk API. {pull}39805[#39805] (issue: {issue}39336[#39336]) + +Features/Watcher:: +* Ignore system locale/timezone in croneval CLI tool {pull}33215[#33215] +* Support merge nested Map in list for JIRA configurations {pull}37634[#37634] (issue: {issue}30068[#30068]) +* Watcher accounts constructed lazily {pull}36656[#36656] +* Ensures watch definitions are valid json {pull}30692[#30692] (issue: {issue}29746[#29746]) +* Use non-ILM template setting up watch history template & ILM disabled {pull}39325[#39325] (issue: {issue}38805[#38805]) +* Only flush Watcher's bulk processor if Watcher is enabled {pull}38803[#38803] (issue: {issue}38798[#38798]) +* Fix Watcher stats class cast exception {pull}39821[#39821] (issue: {issue}39780[#39780]) +* Use any index specified by .watches for Watcher {pull}39541[#39541] (issue: {issue}39478[#39478]) +* Resolve concurrency with watcher trigger service {pull}39092[#39092] (issue: {issue}39087[#39087]) +* Metric on watcher stats is a list not an enum {pull}39114[#39114] + +Geo:: +* Test `GeoShapeQueryTests#testPointsOnly` fails {pull}27454[#27454] +* More robust handling of ignore_malformed in geoshape parsing {pull}35603[#35603] (issues: {issue}34047[#34047], {issue}34498[#34498]) +* Better handling of malformed geo_points {pull}35554[#35554] (issue: {issue}35419[#35419]) +* Enables coerce support in WKT polygon parser {pull}35414[#35414] (issue: {issue}35059[#35059]) +* Fix GeoHash PrefixTree BWC {pull}38584[#38584] (issue: {issue}38494[#38494]) +* Do not normalize the longitude with value -180 for Lucene shapes {pull}37299[#37299] (issue: {issue}37297[#37297]) +* Geo Point parse error fix {pull}40447[#40447] (issue: {issue}17617[#17617]) + +Highlighting:: +* Bug fix for AnnotatedTextHighlighter - port of 39525 {pull}39750[#39750] (issue: {issue}39525[#39525]) + +Infra/Core:: +* Ensure shard is refreshed once it's inactive {pull}27559[#27559] (issue: {issue}27500[#27500]) +* Bubble-up exceptions from scheduler {pull}38317[#38317] (issue: {issue}38014[#38014]) +* Revert back to joda's multi date formatters {pull}36814[#36814] (issues: {issue}36447[#36447], {issue}36602[#36602]) +* Propagate Errors in executors to uncaught exception handler {pull}36137[#36137] (issue: {issue}28667[#28667]) +* Correct name of basic_date_time_no_millis {pull}39367[#39367] +* Allow single digit milliseconds in strict date parsing {pull}40676[#40676] (issue: {issue}40403[#40403]) +* Parse composite patterns using ClassicFormat.parseObject {pull}40100[#40100] (issue: {issue}39916[#39916]) +* Bat scripts to work with JAVA_HOME with parantheses {pull}39712[#39712] (issues: {issue}30606[#30606], {issue}33405[#33405], {issue}38578[#38578], {issue}38624[#38624]) +* Change licence expiration date pattern {pull}39681[#39681] (issue: {issue}39136[#39136]) +* Fix DateFormatters.parseMillis when no timezone is given {pull}39100[#39100] (issue: {issue}39067[#39067]) +* Don't close caches while there might still be in-flight requests. {pull}38958[#38958] (issue: {issue}37117[#37117]) +* Allow single digit milliseconds in strict date parsing {pull}40676[#40676] (issue: {issue}40403[#40403]) + +Infra/Packaging:: +* Remove NOREPLACE for /etc/elasticsearch in rpm and deb {pull}37839[#37839] +* Packaging: Update marker used to allow ELASTIC_PASSWORD {pull}37243[#37243] (issue: {issue}37240[#37240]) +* Remove permission editing in postinst {pull}37242[#37242] (issue: {issue}37143[#37143]) +* Some elasticsearch-cli tools could not be run not from ES_HOME {pull}39937[#39937] +* Obsolete pre 7.0 noarch package in rpm {pull}39472[#39472] (issue: {issue}39414[#39414]) +* Suppress error message when `/proc/sys/vm/max_map_count` is not exists. {pull}35933[#35933] +* Use TAR instead of DOCKER build type before 6.7.0 {pull}40723[#40723] (issues: {issue}39378[#39378], {issue}40511[#40511]) +* Source additional files correctly in elasticsearch-cli {pull}40890[#40890] (issue: {issue}40889[#40889]) + +Infra/REST API:: +* Reject all requests that have an unconsumed body {pull}37504[#37504] (issues: {issue}30792[#30792], {issue}37501[#37501], {issue}8217[#8217]) +* Fix #38623 remove xpack namespace REST API {pull}38625[#38625] +* Remove the "xpack" namespace from the REST API {pull}38623[#38623] +* Update spec files that erroneously documented parts as optional {pull}39122[#39122] +* ilm.explain_lifecycle documents human again {pull}39113[#39113] +* Index on rollup.rollup_search.json is a list {pull}39097[#39097] + +Infra/Scripting:: +* Fix Painless void return bug {pull}38046[#38046] +* Correct bug in ScriptDocValues {pull}40488[#40488] + +Infra/Settings:: +* Change format how settings represent lists / array {pull}26723[#26723] +* Fix setting by time unit {pull}37192[#37192] +* Fix handling of fractional byte size value settings {pull}37172[#37172] +* Fix handling of fractional time value settings {pull}37171[#37171] + +Infra/Transport API:: +* Remove version read/write logic in Verify Response {pull}30879[#30879] (issue: {issue}30807[#30807]) +* Enable muted Repository test {pull}30875[#30875] (issue: {issue}30807[#30807]) +* Bad regex in CORS settings should throw a nicer error {pull}29108[#29108] + +Index APIs:: +* Fix duplicate phrase in shrink/split error message {pull}36734[#36734] (issue: {issue}36729[#36729]) +* Raise a 404 exception when document source is not found (#33384) {pull}34083[#34083] (issue: {issue}33384[#33384]) + +Ingest:: +* Fix on_failure with Drop processor {pull}36686[#36686] (issue: {issue}36151[#36151]) +* Support default pipelines + bulk upserts {pull}36618[#36618] (issue: {issue}36219[#36219]) +* Support default pipeline through an alias {pull}36231[#36231] (issue: {issue}35817[#35817]) + +License:: +* Update versions for start_trial after backport {pull}30218[#30218] (issue: {issue}30135[#30135]) +* Do not serialize basic license exp in x-pack info {pull}30848[#30848] +* Update versions for start_trial after backport {pull}30218[#30218] (issue: {issue}30135[#30135]) + +Machine Learning:: +* Interrupt Grok in file structure finder timeout {pull}36588[#36588] +* Prevent stack overflow while copying ML jobs and datafeeds {pull}36370[#36370] (issue: {issue}36360[#36360]) +* Adjust file structure finder parser config {pull}35935[#35935] +* Fix find_file_structure NPE with should_trim_fields {pull}35465[#35465] (issue: {issue}35462[#35462]) +* Prevent notifications being created on deletion of a non existent job {pull}35337[#35337] (issues: {issue}34058[#34058], {issue}35336[#35336]) +* Clear Job#finished_time when it is opened (#32605) {pull}32755[#32755] +* Fix thread leak when waiting for job flush (#32196) {pull}32541[#32541] (issue: {issue}32196[#32196]) +* Fix CPoissonMeanConjugate sampling error. {ml-pull}335[#335] +* Report index unavailable instead of waiting for lazy node {pull}38423[#38423] +* Fix error race condition on stop _all datafeeds and close _all jobs {pull}38113[#38113] (issue: {issue}37959[#37959]) +* Update ML results mappings on process start {pull}37706[#37706] (issue: {issue}37607[#37607]) +* Prevent submit after autodetect worker is stopped {pull}37700[#37700] (issue: {issue}37108[#37108]) +* Fix ML datafeed CCS with wildcarded cluster name {pull}37470[#37470] (issue: {issue}36228[#36228]) +* Update error message for process update {pull}37363[#37363] +* Wait for autodetect to be ready in the datafeed {pull}37349[#37349] (issues: {issue}36810[#36810], {issue}37227[#37227]) +* Stop datafeeds running when their jobs are stale {pull}37227[#37227] (issue: {issue}36810[#36810]) +* Order GET job stats response by job id {pull}36841[#36841] (issue: {issue}36683[#36683]) +* Make GetJobStats work with arbitrary wildcards and groups {pull}36683[#36683] (issue: {issue}34745[#34745]) +* Fix datafeed skipping first bucket after lookback when aggs are … {pull}39859[#39859] (issue: {issue}39842[#39842]) +* Refactoring lazy query and agg parsing {pull}39776[#39776] (issue: {issue}39528[#39528]) +* Stop the ML memory tracker before closing node {pull}39111[#39111] (issue: {issue}37117[#37117]) +* Scrolling datafeed should clear scroll contexts on error {pull}40773[#40773] (issue: {issue}40772[#40772]) + +Mapping:: +* Ensure that field aliases cannot be used in multi-fields. {pull}32219[#32219] +* Treat put-mapping calls with `_doc` as a top-level key as typed calls. {pull}38032[#38032] +* Correct deprec log in RestGetFieldMappingAction {pull}37843[#37843] (issue: {issue}37667[#37667]) +* Restore a noop _all metadata field for 6x indices {pull}37808[#37808] (issue: {issue}37429[#37429]) +* Make sure PutMappingRequest accepts content types other than JSON. {pull}37720[#37720] +* Make sure to use the resolved type in DocumentMapperService#extractMappings. {pull}37451[#37451] (issue: {issue}36811[#36811]) +* Improve Precision for scaled_float {pull}37169[#37169] (issue: {issue}32570[#32570]) +* Make sure to accept empty unnested mappings in create index requests. {pull}37089[#37089] +* Stop automatically nesting mappings in index creation requests. {pull}36924[#36924] +* Rewrite SourceToParse with resolved docType {pull}36921[#36921] (issues: {issue}35790[#35790], {issue}36769[#36769]) +* Optimise rejection of out-of-range `long` values {pull}40325[#40325] (issues: {issue}26137[#26137], {issue}40323[#40323]) +* Make sure to reject mappings with type _doc when include_type_name is false. {pull}38270[#38270] (issue: {issue}38266[#38266]) + +Network:: +* Adjust SSLDriver behavior for JDK11 changes {pull}32145[#32145] (issues: {issue}32122[#32122], {issue}32144[#32144]) +* Netty4SizeHeaderFrameDecoder error {pull}31057[#31057] +* Fix memory leak in http pipelining {pull}30815[#30815] (issue: {issue}30801[#30801]) +* Fix issue with finishing handshake in ssl driver {pull}30580[#30580] +* Do not resolve addresses in remote connection info {pull}36671[#36671] (issue: {issue}35658[#35658]) +* Always compress based on the settings {pull}36522[#36522] (issue: {issue}36399[#36399]) +* http.publish_host Should Contain CNAME {pull}32806[#32806] (issue: {issue}22029[#22029]) +* Adjust SSLDriver behavior for JDK11 changes {pull}32145[#32145] (issues: {issue}32122[#32122], {issue}32144[#32144]) +* Add TRACE, CONNECT, and PATCH http methods {pull}31035[#31035] (issue: {issue}31017[#31017]) +* Transport client: Don't validate node in handshake {pull}30737[#30737] (issue: {issue}30141[#30141]) +* Fix issue with finishing handshake in ssl driver {pull}30580[#30580] +* Remove potential nio selector leak {pull}27825[#27825] +* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) +* Do not set SO_LINGER on server channels {pull}26997[#26997] +* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) +* Release pipelined http responses on close {pull}26226[#26226] +* Reload SSL context on file change for LDAP {pull}36937[#36937] (issues: {issue}30509[#30509], {issue}36923[#36923]) +* Do not resolve addresses in remote connection info {pull}36671[#36671] (issue: {issue}35658[#35658]) + +Packaging:: +* Fix error message when package install fails due to missing Java {pull}36077[#36077] (issue: {issue}31845[#31845]) +* Add missing entries to conffiles {pull}35810[#35810] (issue: {issue}35691[#35691]) + +Plugins:: +* Ensure that azure stream has socket privileges {pull}28751[#28751] (issue: {issue}28662[#28662]) + +Ranking:: +* QueryRescorer should keep the window size when rewriting {pull}36836[#36836] + +Recovery:: +* Register ResyncTask.Status as a NamedWriteable {pull}36610[#36610] +* RecoveryMonitor#lastSeenAccessTime should be volatile {pull}36781[#36781] +* Create retention leases file during recovery {pull}39359[#39359] (issue: {issue}37165[#37165]) +* Recover peers from translog, ignoring soft deletes {pull}38904[#38904] (issue: {issue}37165[#37165]) +* Retain history for peer recovery using leases {pull}38855[#38855] +* Resync should not send operations without sequence number {pull}40433[#40433] + +Rollup:: +* Fix rollup search statistics {pull}36674[#36674] +* Fix Rollup's metadata parser {pull}36791[#36791] (issue: {issue}36726[#36726]) +* Fix rollup search statistics {pull}36674[#36674] +* Remove timezone validation on rollup range queries {pull}40647[#40647] +* Rollup ignores time_zone on date histogram {pull}40844[#40844] + +Scripting:: +* Properly support no-offset date formatting {pull}36316[#36316] (issue: {issue}36306[#36306]) +* [Painless] Generate Bridge Methods {pull}36097[#36097] +* Fix serialization bug in painless execute api request {pull}36075[#36075] (issue: {issue}36050[#36050]) +* Actually add joda time back to whitelist {pull}35965[#35965] (issue: {issue}35915[#35915]) +* Add back joda to whitelist {pull}35915[#35915] (issue: {issue}35913[#35913]) + +Settings:: +* Correctly Identify Noop Updates {pull}36560[#36560] (issue: {issue}36496[#36496]) + +Search:: +* Ensure realtime `_get` and `_termvectors` don't run on the network thread {pull}33814[#33814] (issue: {issue}27500[#27500]) +* [bug] fuzziness custom auto {pull}33462[#33462] (issue: {issue}33454[#33454]) +* Fix inner hits retrieval when stored fields are disabled (_none_) {pull}33018[#33018] (issue: {issue}32941[#32941]) +* Set maxScore for empty TopDocs to Nan rather than 0 {pull}32938[#32938] +* Handle leniency for cross_fields type in multi_match query {pull}27045[#27045] (issue: {issue}23210[#23210]) +* Raise IllegalArgumentException instead if query validation failed {pull}26811[#26811] (issue: {issue}26799[#26799]) +* Inner hits fail to propagate doc-value format. {pull}36310[#36310] +* Fix custom AUTO issue with Fuzziness#toXContent {pull}35807[#35807] (issue: {issue}33462[#33462]) +* Fix analyzed prefix query in query_string {pull}35756[#35756] (issue: {issue}31702[#31702]) +* Fix problem with MatchNoDocsQuery in disjunction queries {pull}35726[#35726] (issue: {issue}34708[#34708]) +* Fix phrase_slop in query_string query {pull}35533[#35533] (issue: {issue}35125[#35125]) +* Add a More Like This query routing requirement check (#29678) {pull}33974[#33974] +* Look up connection using the right cluster alias when releasing contexts {pull}38570[#38570] +* Fix fetch source option in expand search phase {pull}37908[#37908] (issue: {issue}23829[#23829]) +* Change `rational` to `saturation` in script_score {pull}37766[#37766] (issue: {issue}37714[#37714]) +* Throw if two inner_hits have the same name {pull}37645[#37645] (issue: {issue}37584[#37584]) +* Ensure either success or failure path for SearchOperationListener is called {pull}37467[#37467] (issue: {issue}37185[#37185]) +* `query_string` should use indexed prefixes {pull}36895[#36895] +* Avoid duplicate types deprecation messages in search-related APIs. {pull}36802[#36802] +* Serialize top-level pipeline aggs as part of InternalAggregations {pull}40177[#40177] (issues: {issue}40059[#40059], {issue}40101[#40101]) +* CCS: Skip empty search hits when minimizing round-trips {pull}40098[#40098] (issues: {issue}32125[#32125], {issue}40067[#40067]) +* CCS: Disable minimizing round-trips when dfs is requested {pull}40044[#40044] (issue: {issue}32125[#32125]) +* Fix Fuzziness#asDistance(String) {pull}39643[#39643] (issue: {issue}39614[#39614]) +* Fix alias resolution runtime complexity. {pull}40263[#40263] (issue: {issue}40248[#40248]) + +Security:: +* Handle 6.4.0+ BWC for Application Privileges {pull}32929[#32929] +* Remove license state listeners on closeables {pull}36308[#36308] (issues: {issue}33328[#33328], {issue}35627[#35627], {issue}35628[#35628]) +* Fix exit code for Security CLI tools {pull}37956[#37956] (issue: {issue}37841[#37841]) +* Fix potential NPE in UsersTool {pull}37660[#37660] +* Remove dynamic objects from security index {pull}40499[#40499] (issue: {issue}35460[#35460]) +* Fix libs:ssl-config project setup {pull}39074[#39074] +* Do not create the missing index when invoking getRole {pull}39039[#39039] + +Snapshot/Restore:: +* Upgrade GCS Dependencies to 1.55.0 {pull}36634[#36634] (issues: {issue}35229[#35229], {issue}35459[#35459]) +* Improve Resilience SnapshotShardService {pull}36113[#36113] (issue: {issue}32265[#32265]) +* Keep SnapshotsInProgress State in Sync with Routing Table {pull}35710[#35710] +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) +* Fix Concurrent Snapshot Ending And Stabilize Snapshot Finalization {pull}38368[#38368] (issue: {issue}38226[#38226]) +* Fix Two Races that Lead to Stuck Snapshots {pull}37686[#37686] (issues: {issue}32265[#32265], {issue}32348[#32348]) +* Fix Race in Concurrent Snapshot Delete and Create {pull}37612[#37612] (issue: {issue}37581[#37581]) +* Streamline S3 Repository- and Client-Settings {pull}37393[#37393] +* Blob store compression fix {pull}39073[#39073] + +SQL:: +* Fix translation of LIKE/RLIKE keywords {pull}36672[#36672] (issues: {issue}36039[#36039], {issue}36584[#36584]) +* Scripting support for casting functions CAST and CONVERT {pull}36640[#36640] (issue: {issue}36061[#36061]) +* Fix translation to painless for conditionals {pull}36636[#36636] (issue: {issue}36631[#36631]) +* Concat should be always not nullable {pull}36601[#36601] (issue: {issue}36169[#36169]) +* Fix MOD() for long and integer arguments {pull}36599[#36599] (issue: {issue}36364[#36364]) +* Fix issue with complex HAVING and GROUP BY ordinal {pull}36594[#36594] (issue: {issue}36059[#36059]) +* Be lenient for tests involving comparison to H2 but strict for csv spec tests {pull}36498[#36498] (issue: {issue}36483[#36483]) +* Non ISO 8601 versions of DAY_OF_WEEK and WEEK_OF_YEAR functions {pull}36358[#36358] (issue: {issue}36263[#36263]) +* Do not ignore all fields whose names start with underscore {pull}36214[#36214] (issue: {issue}36206[#36206]) +* Fix issue with wrong data type for scripted Grouping keys {pull}35969[#35969] (issue: {issue}35662[#35662]) +* Fix translation of math functions to painless {pull}35910[#35910] (issue: {issue}35654[#35654]) +* Fix jdbc jar to include deps {pull}35602[#35602] +* Fix query translation for scripted queries {pull}35408[#35408] (issue: {issue}35232[#35232]) +* Clear the cursor if nested inner hits are enough to fulfill the query required limits {pull}35398[#35398] (issue: {issue}35176[#35176]) +* Introduce IsNull node to simplify expressions {pull}35206[#35206] (issues: {issue}34876[#34876], {issue}35171[#35171]) +* The SSL default configuration shouldn't override the https protocol if used {pull}34635[#34635] (issue: {issue}33817[#33817]) +* Minor fix for javadoc {pull}32573[#32573] (issue: {issue}32553[#32553]) +* Prevent grouping over grouping functions {pull}38649[#38649] (issue: {issue}38308[#38308]) +* Relax StackOverflow circuit breaker for constants {pull}38572[#38572] (issue: {issue}38571[#38571]) +* Fix issue with IN not resolving to underlying keyword field {pull}38440[#38440] (issue: {issue}38424[#38424]) +* Change the Intervals milliseconds precision to 3 digits {pull}38297[#38297] (issue: {issue}37423[#37423]) +* Fix esType for DATETIME/DATE and INTERVALS {pull}38179[#38179] (issue: {issue}38051[#38051]) +* Added SSL configuration options tests {pull}37875[#37875] (issue: {issue}37711[#37711]) +* Fix casting from date to numeric type to use millis {pull}37869[#37869] (issue: {issue}37655[#37655]) +* Fix BasicFormatter NPE {pull}37804[#37804] +* Return Intervals in SQL format for CLI {pull}37602[#37602] (issues: {issue}29970[#29970], {issue}36186[#36186], {issue}36432[#36432]) +* Fix object extraction from sources {pull}37502[#37502] (issue: {issue}37364[#37364]) +* Fix issue with field names containing "." {pull}37364[#37364] (issue: {issue}37128[#37128]) +* Fix bug regarding alias fields with dots {pull}37279[#37279] (issue: {issue}37224[#37224]) +* Proper handling of COUNT(field_name) and COUNT(DISTINCT field_name) {pull}37254[#37254] (issue: {issue}30285[#30285]) +* Fix COUNT DISTINCT filtering {pull}37176[#37176] (issue: {issue}37086[#37086]) +* Fix issue with wrong NULL optimization {pull}37124[#37124] (issue: {issue}35872[#35872]) +* Fix issue with complex expression as args of PERCENTILE/_RANK {pull}37102[#37102] (issue: {issue}37099[#37099]) +* Handle the bwc Joda ZonedDateTime scripting class in Painless {pull}37024[#37024] (issue: {issue}37023[#37023]) +* Fix bug regarding histograms usage in scripting {pull}36866[#36866] +* Fix issue with always false filter involving functions {pull}36830[#36830] (issue: {issue}35980[#35980]) +* Protocol returns ISO 8601 String formatted dates instead of Long for JDBC/ODBC requests {pull}36800[#36800] (issue: {issue}36756[#36756]) +* Enhance Verifier to prevent aggregate or grouping functions from {pull}36799[#36799] (issue: {issue}36798[#36798]) +* Fix translation of LIKE/RLIKE keywords {pull}36672[#36672] (issues: {issue}36039[#36039], {issue}36584[#36584]) +* Scripting support for casting functions CAST and CONVERT {pull}36640[#36640] (issue: {issue}36061[#36061]) +* Concat should be always not nullable {pull}36601[#36601] (issue: {issue}36169[#36169]) +* Fix issue with complex HAVING and GROUP BY ordinal {pull}36594[#36594] (issue: {issue}36059[#36059]) +* Add missing handling of IP field in JDBC {pull}40384[#40384] (issue: {issue}40358[#40358]) +* Fix metric aggs on date/time to not return double {pull}40377[#40377] (issues: {issue}39492[#39492], {issue}40376[#40376]) +* CAST supports both SQL and ES types {pull}40365[#40365] (issue: {issue}40282[#40282]) +* Fix RLIKE bug and improve testing for RLIKE statement {pull}40354[#40354] (issues: {issue}34609[#34609], {issue}39931[#39931]) +* Unwrap the first value in an array in case of array leniency {pull}40318[#40318] (issue: {issue}40296[#40296]) +* Preserve original source for cast/convert function {pull}40271[#40271] (issue: {issue}40239[#40239]) +* Fix LIKE function equality by considering its pattern as well {pull}40260[#40260] (issue: {issue}39931[#39931]) +* Fix issue with optimization on queries with ORDER BY/LIMIT {pull}40256[#40256] (issue: {issue}40211[#40211]) +* Rewrite ROUND and TRUNCATE functions with a different optional parameter handling method {pull}40242[#40242] (issue: {issue}40001[#40001]) +* Fix issue with getting DATE type in JDBC {pull}40207[#40207] +* Fix issue with date columns returned always in UTC {pull}40163[#40163] (issue: {issue}40152[#40152]) +* Add multi_value_field_leniency inside FieldHitExtractor {pull}40113[#40113] (issue: {issue}39700[#39700]) +* Fix incorrect ordering of groupings (GROUP BY) based on orderings (ORDER BY) {pull}40087[#40087] (issue: {issue}39956[#39956]) +* Fix bug with JDBC timezone setting and DATE type {pull}39978[#39978] (issue: {issue}39915[#39915]) +* Use underlying exact field for LIKE/RLIKE {pull}39443[#39443] (issue: {issue}39442[#39442]) +* Fix display size for DATE/DATETIME {pull}40669[#40669] +* Have LIKE/RLIKE use wildcard and regexp queries {pull}40628[#40628] (issue: {issue}40557[#40557]) +* Fix getTime() methods in JDBC {pull}40484[#40484] +* SYS TABLES: enumerate tables of requested types {pull}40535[#40535] (issue: {issue}40348[#40348]) +* Passing an input to the CLI "freezes" the CLI after displaying an error message {pull}40164[#40164] (issue: {issue}40557[#40557]) +* Wrap ZonedDateTime parameters inside scripts {pull}39911[#39911] (issue: {issue}39877[#39877]) +* ConstantProcessor can now handle NamedWriteable {pull}39876[#39876] (issue: {issue}39875[#39875]) +* Extend the multi dot field notation extraction to lists of values {pull}39823[#39823] (issue: {issue}39738[#39738]) +* Values in datetime script aggs should be treated as long {pull}39773[#39773] (issue: {issue}37042[#37042]) +* Don't allow inexact fields for MIN/MAX {pull}39563[#39563] (issue: {issue}39427[#39427]) +* Fix merging of incompatible multi-fields {pull}39560[#39560] (issue: {issue}39547[#39547]) +* Fix COUNT DISTINCT column name {pull}39537[#39537] (issue: {issue}39511[#39511]) +* Enable accurate hit tracking on demand {pull}39527[#39527] (issue: {issue}37971[#37971]) +* Ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' {pull}39518[#39518] (issue: {issue}39471[#39471]) +* Enforce JDBC driver - ES server version parity {pull}38972[#38972] (issue: {issue}38775[#38775]) +* Fall back to using the field name for column label {pull}38842[#38842] (issue: {issue}38831[#38831]) + +Suggesters:: +* Fix duplicate removal when merging completion suggestions {pull}36996[#36996] (issue: {issue}35836[#35836]) + +Task Management:: +* Un-assign persistent tasks as nodes exit the cluster {pull}37656[#37656] + +Watcher:: +* Watcher accounts constructed lazily {pull}36656[#36656] +* Only trigger a watch if new or schedule/changed {pull}35908[#35908] +* Fix Watcher NotificationService's secure settings {pull}35610[#35610] (issue: {issue}35378[#35378]) +* Fix integration tests to ensure correct start/stop of Watcher {pull}35271[#35271] (issues: {issue}29877[#29877], {issue}30705[#30705], {issue}33291[#33291], {issue}34448[#34448], {issue}34462[#34462]) + +ZenDiscovery:: +* Remove duplicate discovered peers {pull}35505[#35505] +* Respect the no_master_block setting {pull}36478[#36478] +* Cancel GetDiscoveredNodesAction when bootstrapped {pull}36423[#36423] (issues: {issue}36380[#36380], {issue}36381[#36381]) +* Only elect master-eligible nodes {pull}35996[#35996] +* Remove duplicate discovered peers {pull}35505[#35505] +* Fix size of rolling-upgrade bootstrap config {pull}38031[#38031] +* Always return metadata version if metadata is requested {pull}37674[#37674] +* Elect freshest master in upgrade {pull}37122[#37122] (issue: {issue}40[#40]) +* Fix cluster state persistence for single-node discovery {pull}36825[#36825] + +[[regression-7.0.0]] +[float] +=== Regressions + +Infra/Core:: +* Restore date aggregation performance in UTC case {pull}38221[#38221] (issue: {issue}37826[#37826]) +* Speed up converting of temporal accessor to zoned date time {pull}37915[#37915] (issue: {issue}37826[#37826]) + +Mapping:: +* Performance fix. Reduce deprecation calls for the same bulk request {pull}37415[#37415] (issue: {issue}37411[#37411]) + +Scripting:: +* Use Number as a return value for BucketAggregationScript {pull}35653[#35653] (issue: {issue}35351[#35351]) + +[[upgrade-7.0.0]] +[float] +=== Upgrades + +Discovery-Plugins:: +* Bump jackson-databind version for AWS SDK {pull}39183[#39183] + +Engine:: +* Upgrade to lucene-8.0.0-snapshot-83f9835. {pull}37668[#37668] +* Upgrade to Lucene 8.0.0-snapshot-ff9509a8df {pull}39350[#39350] +* Upgrade to Lucene 8.0.0 {pull}39992[#39992] (issue: {issue}39640[#39640]) + +Geo:: +* Upgrade JTS to 1.14.0 {pull}29141[#29141] (issue: {issue}29122[#29122]) + +Ingest:: +* Update geolite2 database in ingest geoip plugin {pull}33840[#33840] +* Bump jackson-databind version for ingest-geoip {pull}39182[#39182] + +Infra/Core:: +* Upgrade to a Lucene 8 snapshot {pull}33310[#33310] (issues: {issue}32899[#32899], {issue}33028[#33028], {issue}33309[#33309]) + +Security:: +* Upgrade the bouncycastle dependency to 1.61 {pull}40017[#40017] (issue: {issue}40011[#40011]) + +Search:: +* Upgrade to Lucene 8.0.0 GA {pull}39992[#39992] (issue: {issue}39640[#39640]) + +Snapshot/Restore:: +* plugins/repository-gcs: Update google-cloud-storage/core to 1.59.0 {pull}39748[#39748] (issue: {issue}39366[#39366]) + +Network:: +* Fix Netty Leaks by upgrading to 4.1.28 {pull}32511[#32511] (issue: {issue}32487[#32487]) +* Upgrade Netty 4.3.32.Final {pull}36102[#36102] (issue: {issue}35360[#35360]) + +Machine Learning:: +* No need to add state doc mapping on job open in 7.x {pull}37759[#37759] From 0a85d1fe03ead83e39208d1e27ab1a7ba6a550c0 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 10 Apr 2019 09:41:04 -0400 Subject: [PATCH 35/60] Add missing "long form" 7.0 breaking changes (#41049) These are breaking changes that were present in the release notes but the PR didn't include any "narrative long form" description for the docs. --- docs/reference/migration/migrate_7_0.asciidoc | 4 ++ .../migrate_7_0/aggregations.asciidoc | 8 +++ .../migration/migrate_7_0/api.asciidoc | 62 ++++++++++++++++++- .../migration/migrate_7_0/ingest.asciidoc | 27 ++++++++ .../migrate_7_0/low_level_restclient.asciidoc | 5 ++ .../migration/migrate_7_0/mappings.asciidoc | 9 ++- .../migration/migrate_7_0/ml.asciidoc | 15 +++++ .../migration/migrate_7_0/packaging.asciidoc | 6 ++ .../migration/migrate_7_0/search.asciidoc | 22 +++++++ .../migration/migrate_7_0/settings.asciidoc | 19 ++++++ .../migration/migrate_7_0/suggesters.asciidoc | 5 ++ 11 files changed, 179 insertions(+), 3 deletions(-) create mode 100644 docs/reference/migration/migrate_7_0/ingest.asciidoc create mode 100644 docs/reference/migration/migrate_7_0/ml.asciidoc diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 3d723074e9d..f0bdd1de439 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -13,7 +13,9 @@ See also <> and <>. * <> * <> * <> +* <> * <> +* <> * <> * <> * <> @@ -50,8 +52,10 @@ include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] include::migrate_7_0/cluster.asciidoc[] include::migrate_7_0/discovery.asciidoc[] +include::migrate_7_0/ingest.asciidoc[] include::migrate_7_0/indices.asciidoc[] include::migrate_7_0/mappings.asciidoc[] +include::migrate_7_0/ml.asciidoc[] include::migrate_7_0/search.asciidoc[] include::migrate_7_0/suggesters.asciidoc[] include::migrate_7_0/packaging.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/aggregations.asciidoc b/docs/reference/migration/migrate_7_0/aggregations.asciidoc index 6fb974a1095..85dd2af04f5 100644 --- a/docs/reference/migration/migrate_7_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_7_0/aggregations.asciidoc @@ -47,3 +47,11 @@ explicitly defining how their data is processed. The `percentiles` and `percentile_ranks` aggregations used to return `NaN` in the response if they were applied to an empty set of values. Because `NaN` is not officially supported by JSON, it has been replaced with `null`. + +[float] +==== `stats` and `extended_stats` now return 0 instead of `null` for zero docs + +When the `stats` and `extended_stats` aggregations collected zero docs (`doc_count: 0`), +their value would be `null`. This was in contrast with the `sum` aggregation which +would return `0`. The `stats` and `extended_stats` aggs are now consistent with +`sum` and also return zero. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index cdcf6a93e36..8fae000457f 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -76,10 +76,32 @@ pools. Note that `core` and `max` will be populated for scaling thread pools, and `size` will be populated for fixed thread pools. [float] -==== The parameter `fields` deprecated in 6.x has been removed from Bulk request -and Update request. The Update API returns `400 - Bad request` if request contains +==== The parameter `fields` deprecated in 6.x has been removed from Bulk request +and Update request. The Update API returns `400 - Bad request` if request contains unknown parameters (instead of ignored in the previous version). +[float] +==== PUT Document with Version error message changed when document is missing + +If you attempt to `PUT` a document with versioning (e.g. `PUT /test/_doc/1?version=4`) +but the document does not exist, a cryptic message is returned: + +[source,text] +---------- +version conflict, current version [-1] is different than the one provided [4] +---------- + +Now if the document is missing a more helpful message is returned: + +[source,text] +---------- +document does not exist (expected version [4]) +---------- + +Although exceptions messages are liable to change and not generally subject to +backwards compatibility, the nature of this message might mean clients are relying +on parsing the version numbers and so the format change might impact some users. + [float] [[remove-suggest-metric]] ==== Remove support for `suggest` metric/index metric in indices stats and nodes stats APIs @@ -168,3 +190,39 @@ privilege). The `_cache/clear` API no longer supports the `GET` HTTP verb. It must be called with `POST`. + +[float] +==== Cluster state size metrics removed from Cluster State API Response + +The `compressed_size` / `compressed_size_in_bytes` fields were removed from +the Cluster State API response. The calculation of the size was expensive and had +dubious value, so the field was removed from the response. + +[float] +==== Migration Assistance API has been removed + +The Migration Assistance API has been functionally replaced by the +Deprecation Info API, and the Migration Upgrade API is not used for the +transition from ES 6.x to 7.x, and does not need to be kept around to +repair indices that were not properly upgraded before upgrading the +cluster, as was the case in 6. + +[float] +==== Changes to thread pool naming in Node and Cat APIs +The `thread_pool` information returned from the Nodes and Cat APIs has been +standardized to use the same terminology as the thread pool configurations. +This means the response will align with the configuration instead of being +the same across all the thread pools, regardless of type. + +[float] +==== Return 200 when cluster has valid read-only blocks +If the cluster was configured with `no_master_block: write` and lost its master, +it would return a `503` status code from a main request (`GET /`) even though +there are viable read-only nodes available. The cluster now returns 200 status +in this situation. + +[float] +==== Clearing indices cache is now POST-only +Clearing the cache indices could previously be done via GET and POST. As GET should +only support read only non state-changing operations, this is no longer allowed. +Only POST can be used to clear the cache. diff --git a/docs/reference/migration/migrate_7_0/ingest.asciidoc b/docs/reference/migration/migrate_7_0/ingest.asciidoc new file mode 100644 index 00000000000..a8c9b8084d6 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/ingest.asciidoc @@ -0,0 +1,27 @@ +[float] +[[breaking_70_ingest_changes]] +=== API changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +[float] +==== Ingest configuration exception information is now transmitted in metadata field + +Previously, some ingest configuration exception information about ingest processors +was sent to the client in the HTTP headers, which is inconsistent with how +exceptions are conveyed in other parts of Elasticsearch. + +Configuration exception information is now conveyed as a field in the response +body. +//end::notable-breaking-changes[] +[float] +==== Ingest plugin special handling has been removed +There was some special handling for installing and removing the `ingest-geoip` and +`ingest-user-agent` plugins after they were converted to modules. This special handling +was done to minimize breaking users in a minor release, and would exit with a status code +zero to avoid breaking automation. + +This special handling has now been removed. diff --git a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc index e3132b59680..0c7539873cc 100644 --- a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc +++ b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc @@ -29,3 +29,8 @@ backwards compatibility. We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports host metadata used by the `NodeSelector`. + +[float] +==== Minimum compiler version change +The minimum compiler version on the low-level REST client has been bumped +to JDK 8. diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index 422c649e33b..cbbe20bbf84 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -21,12 +21,14 @@ This field used to index a composite key formed of the `_type` and the `_id`. Now that indices cannot have multiple types, this has been removed in favour of `_id`. +//tag::notable-breaking-changes[] [float] ==== The `_default_` mapping is no longer allowed The `_default_` mapping has been deprecated in 6.0 and is now no longer allowed in 7.0. Trying to configure a `_default_` mapping on 7.x indices will result in an error. +//end::notable-breaking-changes[] [float] ==== `index_options` for numeric fields has been removed @@ -85,4 +87,9 @@ will be removed in a future version. The maximum allowed number of completion contexts in a mapping will be limited to 10 in the next major version. Completion fields that define more than 10 -contexts in a mapping will log a deprecation warning in this version. \ No newline at end of file +contexts in a mapping will log a deprecation warning in this version. + +[float] +==== `include_type_name` now defaults to `false` +The default for `include_type_name` is now `false` for all APIs that accept +the parameter. diff --git a/docs/reference/migration/migrate_7_0/ml.asciidoc b/docs/reference/migration/migrate_7_0/ml.asciidoc new file mode 100644 index 00000000000..89b77232821 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/ml.asciidoc @@ -0,0 +1,15 @@ +[float] +[[breaking_70_ml_changes]] +=== ML changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== Types in Datafeed config are no longer valid +Types have been removed from the datafeed config and are no longer +valid parameters. diff --git a/docs/reference/migration/migrate_7_0/packaging.asciidoc b/docs/reference/migration/migrate_7_0/packaging.asciidoc index db5df312356..f2b4c2a8a2f 100644 --- a/docs/reference/migration/migrate_7_0/packaging.asciidoc +++ b/docs/reference/migration/migrate_7_0/packaging.asciidoc @@ -29,3 +29,9 @@ for windows. These files have been removed. Use the `zip` package instead. Ubuntu 14.04 will reach end-of-life on April 30, 2019. As such, we are no longer supporting Ubuntu 14.04. + +[float] +==== CLI secret prompting is no longer supported +The ability to use `${prompt.secret}` and `${prompt.text}` to collect secrets +from the CLI at server start is no longer supported. Secure settings have replaced +the need for these prompts. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index f0591408964..8b23201f42c 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -212,6 +212,7 @@ on whether queries need to access score or not. As a result `bool` queries with `minimum_should_match` to 1. This behavior has been deprecated in the previous major version. +//tag::notable-breaking-changes[] [float] ==== `hits.total` is now an object in the search response @@ -241,6 +242,7 @@ You can also retrieve `hits.total` as a number in the rest response by adding `rest_total_hits_as_int=true` in the request parameter of the search request. This parameter has been added to ease the transition to the new format and will be removed in the next major version (8.0). +//end::notable-breaking-changes[] [float] ==== `hits.total` is omitted in the response if `track_total_hits` is disabled (false) @@ -250,6 +252,7 @@ will set `hits.total` to null and the object will not be displayed in the rest layer. You can add `rest_total_hits_as_int=true` in the search request parameters to get the old format back (`"total": -1`). +//tag::notable-breaking-changes[] [float] ==== `track_total_hits` defaults to 10,000 @@ -280,3 +283,22 @@ documents. If the total number of hits that match the query is greater than this You can force the count to always be accurate by setting `"track_total_hits` to true explicitly in the search request. +//end::notable-breaking-changes[] + +[float] +==== Limitations on Similarities +Lucene 8 introduced more constraints on similarities, in particular: + +- scores must not be negative, +- scores must not decrease when term freq increases, +- scores must not increase when norm (interpreted as an unsigned long) increases. + +[float] +==== Weights in Function Score must be positive +Negative `weight` parameters in the `function_score` are no longer allowed. + +[float] +==== Query string and Simple query string limit expansion of fields to 1024 +The number of automatically expanded fields for the "all fields" +mode (`"default_field": "*"`) for the query_string and simple_query_string +queries is now 1024 fields. diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index f2488092c85..499e6ce3a0f 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -211,3 +211,22 @@ Elastic Stack to handle the indexing part. ==== Ingest User Agent processor defaults uses `ecs` output format https://github.com/elastic/ecs[ECS] format is now the default. The `ecs` setting for the user agent ingest processor now defaults to true. + +[float] +[[remove-action-master-force_local]] +==== Remove `action.master.force_local` + +The `action.master.force_local` setting was an undocumented setting, used +internally by the tribe node to force reads to local cluster state (instead of +forwarding to a master, which tribe nodes did not have). Since the tribe +node was removed, this setting was removed too. + +[float] +==== Enforce cluster-wide shard limit +The cluster-wide shard limit is now enforced and not optional. The limit can +still be adjusted as desired using the cluster settings API. + +[float] +==== HTTP Max content length setting is no longer parsed leniently +Previously, `http.max_content_length` would reset to `100mb` if the setting was +`Integer.MAX_VALUE`. This leniency has been removed. diff --git a/docs/reference/migration/migrate_7_0/suggesters.asciidoc b/docs/reference/migration/migrate_7_0/suggesters.asciidoc index 213958382aa..5e00b0d249d 100644 --- a/docs/reference/migration/migrate_7_0/suggesters.asciidoc +++ b/docs/reference/migration/migrate_7_0/suggesters.asciidoc @@ -14,3 +14,8 @@ Plugins must now explicitly indicate the type of suggestion that they produce. +[float] +==== Phrase suggester now multiples alpha +Previously, the laplace smoothing used by the phrase suggester added `alpha`, +when it should instead multiply. This behavior has been changed and will +affect suggester scores. From d00d3f4afa18b3e0dc7029328d292f5a84e1ab32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 10 Apr 2019 15:43:04 +0200 Subject: [PATCH 36/60] Mute DataFrameTransformCheckpointTests#testGetBehind --- .../dataframe/transforms/DataFrameTransformCheckpointTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java index 4124b33ddf1..6fe896872f3 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java @@ -91,6 +91,7 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr .matches(new DataFrameTransformCheckpoint(id, timestamp, checkpoint, checkpointsByIndex, (timeUpperBound / 2) + 1))); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/41076") public void testGetBehind() { String id = randomAlphaOfLengthBetween(1, 10); long timestamp = randomNonNegativeLong(); From 64a05e522f1911aa8cb38a2f6c4fb018ff0850fa Mon Sep 17 00:00:00 2001 From: Chris Earle Date: Wed, 10 Apr 2019 10:09:28 -0400 Subject: [PATCH 37/60] Properly handle Monitoring exporters all disabled (#40920) (#41043) When monitoring exporters are all disabled, which must be done explicitly, _and_ monitoring collection is enabled, then any call to `_xpack/monitoring/_bulk` will create a task that never closes _and_ ES collection will stop happening because a semaphore is never marked as completed. This also simplifies the async `ExportBulk` code by removing the third step (second async step, `close`) entirely because it was entirely unnecessary by both implementations. --- .../xpack/monitoring/exporter/ExportBulk.java | 111 ++---------------- .../xpack/monitoring/exporter/Exporters.java | 10 +- .../exporter/http/HttpExportBulk.java | 5 - .../monitoring/exporter/local/LocalBulk.java | 20 +--- .../monitoring/exporter/ExportersTests.java | 76 +++++++++--- .../exporter/http/HttpExporterIT.java | 7 +- 6 files changed, 87 insertions(+), 142 deletions(-) diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java index 0501c625727..938f0f57c26 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.exporter; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -14,7 +15,6 @@ import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import java.util.Collection; import java.util.List; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; /** @@ -25,7 +25,10 @@ public abstract class ExportBulk { protected final String name; protected final ThreadContext threadContext; - private final AtomicReference state = new AtomicReference<>(State.INITIALIZING); + /** + * {@code closed} being {@code false} means that it can still be added onto. + */ + private final AtomicBoolean closed = new AtomicBoolean(); public ExportBulk(String name, ThreadContext threadContext) { this.name = Objects.requireNonNull(name); @@ -45,7 +48,7 @@ public abstract class ExportBulk { * Add documents to the exporting bulk */ public void add(Collection docs) throws ExportException { - if (state.get() == State.INITIALIZING) { + if (closed.get() == false) { doAdd(docs); } } @@ -56,7 +59,7 @@ public abstract class ExportBulk { * Flush the exporting bulk */ public void flush(ActionListener listener) { - if (state.compareAndSet(State.INITIALIZING, State.FLUSHING)) { + if (closed.compareAndSet(false, true)) { doFlush(listener); } else { listener.onResponse(null); @@ -65,56 +68,6 @@ public abstract class ExportBulk { protected abstract void doFlush(ActionListener listener); - /** - * Close the exporting bulk - */ - public void close(boolean flush, ActionListener listener) { - if (state.getAndSet(State.CLOSED) != State.CLOSED) { - if (flush) { - flushAndClose(listener); - } else { - doClose(listener); - } - } else { - listener.onResponse(null); - } - } - - private void flushAndClose(ActionListener listener) { - doFlush(new ActionListener() { - @Override - public void onResponse(Void aVoid) { - doClose(listener); - } - - @Override - public void onFailure(Exception e) { - // we need to close in spite of the failure, but we will return the failure - doClose(new ActionListener() { - - private final ExportException exportException = new ExportException("Exception when closing export bulk", e); - - @Override - public void onResponse(Void aVoid) { - listener.onFailure(exportException); - } - - @Override - public void onFailure(Exception e) { - exportException.addSuppressed(e); - listener.onFailure(exportException); - } - }); - } - }); - } - - protected abstract void doClose(ActionListener listener); - - protected boolean isClosed() { - return state.get() == State.CLOSED; - } - /** * This class holds multiple export bulks exposed as a single compound bulk. */ @@ -170,54 +123,16 @@ public abstract class ExportBulk { iteratingActionListener.run(); } - @Override - protected void doClose(ActionListener listener) { - final SetOnce exceptionRef = new SetOnce<>(); - final BiConsumer> bulkBiConsumer = (exportBulk, iteratingListener) -> { - // for every export bulk we close and pass back the response, which should always be - // null. When we have an exception, we wrap the first and then add suppressed exceptions - exportBulk.doClose(ActionListener.wrap(iteratingListener::onResponse, e -> { - if (exceptionRef.get() == null) { - exceptionRef.set(new ExportException("failed to close export bulks", e)); - } else if (e instanceof ExportException) { - exceptionRef.get().addExportException((ExportException) e); - } else { - exceptionRef.get().addSuppressed(e); - } - // this is tricky to understand but basically we suppress the exception for use - // later on and call the passed in listener so that iteration continues - iteratingListener.onResponse(null); - })); - }; - IteratingActionListener iteratingActionListener = - new IteratingActionListener<>(newExceptionHandlingListener(exceptionRef, listener), bulkBiConsumer, bulks, - threadContext); - iteratingActionListener.run(); - } - private static ActionListener newExceptionHandlingListener(SetOnce exceptionRef, ActionListener listener) { - return new ActionListener() { - @Override - public void onResponse(Void aVoid) { - if (exceptionRef.get() == null) { - listener.onResponse(null); - } else { - listener.onFailure(exceptionRef.get()); - } + return ActionListener.wrap(r -> { + if (exceptionRef.get() == null) { + listener.onResponse(null); + } else { + listener.onFailure(exceptionRef.get()); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }; + }, listener::onFailure); } } - private enum State { - INITIALIZING, - FLUSHING, - CLOSED - } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java index 484361ddc54..1b8f5dab9e3 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java @@ -179,6 +179,14 @@ public class Exporters extends AbstractLifecycleComponent { } final Map exporterMap = exporters.get(); + + // if no exporters are defined (which is only possible if all are defined explicitly disabled), + // then ignore the request immediately + if (exporterMap.isEmpty()) { + listener.onResponse(null); + return; + } + final AtomicArray accumulatedBulks = new AtomicArray<>(exporterMap.size()); final CountDown countDown = new CountDown(exporterMap.size()); @@ -225,7 +233,7 @@ public class Exporters extends AbstractLifecycleComponent { } catch (ExportException e) { exceptionRef.set(e); } finally { - bulk.close(lifecycleState() == Lifecycle.State.STARTED, ActionListener.wrap(r -> { + bulk.flush(ActionListener.wrap(r -> { if (exceptionRef.get() == null) { listener.onResponse(null); } else { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java index cd307322cb5..3476495cc92 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java @@ -123,11 +123,6 @@ class HttpExportBulk extends ExportBulk { } } - @Override - protected void doClose(ActionListener listener) { - listener.onResponse(null); - } - private byte[] toBulkBytes(final MonitoringDoc doc) throws IOException { final XContentType xContentType = XContentType.JSON; final XContent xContent = xContentType.xContent(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java index 3eb92fd68a3..4b80f614b48 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java @@ -30,8 +30,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** * LocalBulk exports monitoring data in the local cluster using bulk requests. Its usage is not thread safe since the - * {@link LocalBulk#add(Collection)}, {@link LocalBulk#flush(org.elasticsearch.action.ActionListener)} and - * {@link LocalBulk#doClose(ActionListener)} methods are not synchronized. + * {@link LocalBulk#add(Collection)} and {@link LocalBulk#flush(org.elasticsearch.action.ActionListener)} + * methods are not synchronized. */ public class LocalBulk extends ExportBulk { @@ -52,13 +52,10 @@ public class LocalBulk extends ExportBulk { } @Override - public void doAdd(Collection docs) throws ExportException { + protected void doAdd(Collection docs) throws ExportException { ExportException exception = null; for (MonitoringDoc doc : docs) { - if (isClosed()) { - return; - } if (requestBuilder == null) { requestBuilder = client.prepareBulk(); } @@ -99,8 +96,8 @@ public class LocalBulk extends ExportBulk { } @Override - public void doFlush(ActionListener listener) { - if (requestBuilder == null || requestBuilder.numberOfActions() == 0 || isClosed()) { + protected void doFlush(ActionListener listener) { + if (requestBuilder == null || requestBuilder.numberOfActions() == 0) { listener.onResponse(null); } else { try { @@ -138,11 +135,4 @@ public class LocalBulk extends ExportBulk { } } - @Override - protected void doClose(ActionListener listener) { - if (isClosed() == false) { - requestBuilder = null; - } - listener.onResponse(null); - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java index fde975bfab1..74b38afe451 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java @@ -52,6 +52,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -259,6 +260,25 @@ public class ExportersTests extends ESTestCase { verify(state).blocks(); } + /** + * Verifies that, when no exporters are enabled, the {@code Exporters} will still return as expected. + */ + public void testNoExporters() throws Exception { + Settings.Builder settings = + Settings.builder() + .put("xpack.monitoring.exporters.explicitly_disabled.type", "local") + .put("xpack.monitoring.exporters.explicitly_disabled.enabled", false); + + Exporters exporters = new Exporters(settings.build(), factories, clusterService, licenseState, threadContext); + exporters.start(); + + assertThat(exporters.getEnabledExporters(), empty()); + + assertExporters(exporters); + + exporters.close(); + } + /** * This test creates N threads that export a random number of document * using a {@link Exporters} instance. @@ -276,18 +296,37 @@ public class ExportersTests extends ESTestCase { Exporters exporters = new Exporters(settings.build(), factories, clusterService, licenseState, threadContext); exporters.start(); + assertThat(exporters.getEnabledExporters(), hasSize(nbExporters)); + + final int total = assertExporters(exporters); + + for (Exporter exporter : exporters.getEnabledExporters()) { + assertThat(exporter, instanceOf(CountingExporter.class)); + assertThat(((CountingExporter) exporter).getExportedCount(), equalTo(total)); + } + + exporters.close(); + } + + /** + * Attempt to export a random number of documents via {@code exporters} from multiple threads. + * + * @param exporters The setup / started exporters instance to use. + * @return The total number of documents sent to the {@code exporters}. + */ + private int assertExporters(final Exporters exporters) throws InterruptedException { final Thread[] threads = new Thread[3 + randomInt(7)]; final CyclicBarrier barrier = new CyclicBarrier(threads.length); final List exceptions = new CopyOnWriteArrayList<>(); + final AtomicInteger counter = new AtomicInteger(threads.length); int total = 0; for (int i = 0; i < threads.length; i++) { - int nbDocs = randomIntBetween(10, 50); - total += nbDocs; - + final int threadDocs = randomIntBetween(10, 50); final int threadNum = i; - final int threadDocs = nbDocs; + + total += threadDocs; threads[i] = new Thread(new AbstractRunnable() { @Override @@ -297,18 +336,25 @@ public class ExportersTests extends ESTestCase { @Override protected void doRun() throws Exception { - List docs = new ArrayList<>(); + final List docs = new ArrayList<>(); for (int n = 0; n < threadDocs; n++) { docs.add(new TestMonitoringDoc(randomAlphaOfLength(5), randomNonNegativeLong(), randomNonNegativeLong(), null, MonitoredSystem.ES, randomAlphaOfLength(5), null, String.valueOf(n))); } - barrier.await(10, TimeUnit.SECONDS); exporters.export(docs, ActionListener.wrap( - r -> logger.debug("--> thread [{}] successfully exported {} documents", threadNum, threadDocs), - e -> logger.debug("--> thread [{}] failed to export {} documents", threadNum, threadDocs))); - + r -> { + counter.decrementAndGet(); + logger.debug("--> thread [{}] successfully exported {} documents", threadNum, threadDocs); + }, + e -> { + exceptions.add(e); + logger.debug("--> thread [{}] failed to export {} documents", threadNum, threadDocs); + }) + ); + barrier.await(10, TimeUnit.SECONDS); } }, "export_thread_" + i); + threads[i].start(); } @@ -317,12 +363,9 @@ public class ExportersTests extends ESTestCase { } assertThat(exceptions, empty()); - for (Exporter exporter : exporters.getEnabledExporters()) { - assertThat(exporter, instanceOf(CountingExporter.class)); - assertThat(((CountingExporter) exporter).getExportedCount(), equalTo(total)); - } + assertThat(counter.get(), is(0)); - exporters.close(); + return total; } static class TestExporter extends Exporter { @@ -401,11 +444,6 @@ public class ExportersTests extends ESTestCase { listener.onResponse(null); } - @Override - protected void doClose(ActionListener listener) { - listener.onResponse(null); - } - int getCount() { return count.get(); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index 21a00a435fa..d4ea017ca8b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -608,7 +608,7 @@ public class HttpExporterIT extends MonitoringIntegTestCase { assertBusy(() -> assertThat(clusterService().state().version(), not(ClusterState.UNKNOWN_VERSION))); try (HttpExporter exporter = createHttpExporter(settings)) { - final CountDownLatch awaitResponseAndClose = new CountDownLatch(2); + final CountDownLatch awaitResponseAndClose = new CountDownLatch(1); exporter.openBulk(ActionListener.wrap(exportBulk -> { final HttpExportBulk bulk = (HttpExportBulk)exportBulk; @@ -620,9 +620,8 @@ public class HttpExporterIT extends MonitoringIntegTestCase { e -> fail(e.getMessage()) ); - bulk.doAdd(docs); - bulk.doFlush(listener); - bulk.doClose(listener); // reusing the same listener, which is why we expect countDown x2 + bulk.add(docs); + bulk.flush(listener); }, e -> fail("Failed to create HttpExportBulk"))); // block until the bulk responds From 1eff8976a8565b1781bd4c06cef0e3a2e09e1b09 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 10 Apr 2019 14:06:06 +0200 Subject: [PATCH 38/60] Deprecate AbstractHlrc* and AbstractHlrcStreamable* base test classes (#41014) * moved hlrc parsing tests from xpack to hlrc module and removed dependency on hlrc from xpack core * deprecated old base test class * added deprecated jdoc tag * split test between xpack-core part and hlrc part * added lang-mustache test dependency, this previously came in via hlrc dependency. * added hlrc dependency on a qa module * duplicated ClusterPrivilegeName class in xpack-core, since x-pack core no longer has a dependency on hlrc. * replace ClusterPrivilegeName usages with string literals * moved tests to dedicated to hlrc packages in order to remove Hlrc part from the name and make sure to use imports instead of full qualified class where possible * remove ESTestCase. from method invocation and use method directly, because these tests indirectly extend from ESTestCase --- ...bstractHlrcStreamableXContentTestCase.java | 26 +- .../client}/AbstractHlrcXContentTestCase.java | 26 +- .../client}/XPackInfoResponseTests.java | 23 +- .../DataFrameIndexerTransformStatsTests.java | 28 ++- ...ataFrameTransformCheckpointStatsTests.java | 28 ++- ...aFrameTransformCheckpointingInfoTests.java | 34 ++- .../DataFrameTransformStateAndStatsTests.java | 34 ++- .../hlrc/DataFrameTransformStateTests.java | 125 ++++++++++ .../graph/hlrc/GraphExploreResponseTests.java | 222 ++++++++++++++++++ .../license/GetBasicStatusResponseTests.java | 25 +- .../license/GetTrialStatusResponseTests.java | 24 +- .../client/license/LicenseStatusTests.java | 38 +++ .../license/PutLicenseResponseTests.java | 140 +++++++++++ .../license/StartBasicResponseTests.java | 23 +- .../client/ml}/MlInfoActionResponseTests.java | 24 +- .../ml/PutCalendarActionResponseTests.java | 82 +++++++ .../hlrc}/HasPrivilegesResponseTests.java | 97 +++++--- .../watcher/GetWatchResponseTests.java | 30 ++- .../hlrc/DeleteWatchResponseTests.java | 58 +++++ .../hlrc}/ExecuteWatchResponseTests.java | 23 +- .../watcher/hlrc/PutWatchResponseTests.java | 60 +++++ x-pack/plugin/core/build.gradle | 2 +- .../graph/GraphExploreResponseTests.java | 197 ---------------- .../xpack/license/LicenseStatusTests.java | 15 -- .../license/PutLicenseResponseTests.java | 118 ---------- .../watcher/DeleteWatchResponseTests.java | 43 ---- .../xpack/watcher/PutWatchResponseTests.java | 46 ---- .../DataFrameTransformStateHlrcTests.java | 58 ----- .../PutCalendarActionResponseTests.java | 50 ---- .../authz/permission/LimitedRoleTests.java | 17 +- .../authz/store/CompositeRolesStoreTests.java | 5 +- x-pack/qa/rolling-upgrade/build.gradle | 1 + 32 files changed, 1058 insertions(+), 664 deletions(-) rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol => client/rest-high-level/src/test/java/org/elasticsearch/client}/AbstractHlrcStreamableXContentTestCase.java (67%) rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol => client/rest-high-level/src/test/java/org/elasticsearch/client}/AbstractHlrcXContentTestCase.java (56%) rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack => client/rest-high-level/src/test/java/org/elasticsearch/client}/XPackInfoResponseTests.java (87%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsHlrcTests.java => client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java (61%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java => client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java (58%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java => client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java (54%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java => client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java (56%) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/graph/hlrc/GraphExploreResponseTests.java rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack => client/rest-high-level/src/test/java/org/elasticsearch/client}/license/GetBasicStatusResponseTests.java (53%) rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack => client/rest-high-level/src/test/java/org/elasticsearch/client}/license/GetTrialStatusResponseTests.java (53%) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/license/LicenseStatusTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/license/PutLicenseResponseTests.java rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack => client/rest-high-level/src/test/java/org/elasticsearch/client}/license/StartBasicResponseTests.java (76%) rename {x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action => client/rest-high-level/src/test/java/org/elasticsearch/client/ml}/MlInfoActionResponseTests.java (57%) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java rename {x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user => client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc}/HasPrivilegesResponseTests.java (54%) rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack => client/rest-high-level/src/test/java/org/elasticsearch/client}/watcher/GetWatchResponseTests.java (90%) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java rename {x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher => client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc}/ExecuteWatchResponseTests.java (71%) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcStreamableXContentTestCase.java similarity index 67% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcStreamableXContentTestCase.java index d471a1a03f8..1bf78c26a37 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcStreamableXContentTestCase.java @@ -1,9 +1,22 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol; +package org.elasticsearch.client; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; @@ -14,6 +27,11 @@ import java.io.IOException; import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +/** + * @deprecated Use {@link AbstractResponseTestCase} instead of this class. + */ +// TODO: Remove and change subclasses to use AbstractResponseTestCase instead +@Deprecated public abstract class AbstractHlrcStreamableXContentTestCase extends AbstractStreamableXContentTestCase { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcXContentTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcXContentTestCase.java similarity index 56% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcXContentTestCase.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcXContentTestCase.java index dfe81ab79ce..62e03e6934a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcXContentTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcXContentTestCase.java @@ -1,9 +1,22 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol; +package org.elasticsearch.client; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; @@ -11,6 +24,11 @@ import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +/** + * @deprecated Use {@link AbstractResponseTestCase} instead of this class. + */ +// TODO: Remove and change subclasses to use AbstractResponseTestCase instead +@Deprecated public abstract class AbstractHlrcXContentTestCase extends AbstractXContentTestCase { /** diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java similarity index 87% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java index 8ddc98544a8..052e700a59f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java @@ -1,13 +1,26 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol.xpack; +package org.elasticsearch.client; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsHlrcTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java similarity index 61% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsHlrcTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java index 9dd4bdbdbbd..cb4b9e42838 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsHlrcTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java @@ -1,19 +1,31 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; +package org.elasticsearch.client.dataframe.transforms.hlrc; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStatsTests; import java.io.IOException; -public class DataFrameIndexerTransformStatsHlrcTests extends AbstractHlrcXContentTestCase< +public class DataFrameIndexerTransformStatsTests extends AbstractHlrcXContentTestCase< DataFrameIndexerTransformStats, org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats> { @@ -38,7 +50,7 @@ public class DataFrameIndexerTransformStatsHlrcTests extends AbstractHlrcXConten @Override protected DataFrameIndexerTransformStats createTestInstance() { - return DataFrameIndexerTransformStatsTests.randomStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID); + return DataFrameTransformStateTests.randomStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java similarity index 58% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java index d29cfa2784f..9762aa4ab02 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsHlrcTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java @@ -1,19 +1,31 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; +package org.elasticsearch.client.dataframe.transforms.hlrc; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStatsTests; import java.io.IOException; -public class DataFrameTransformCheckpointStatsHlrcTests extends AbstractHlrcXContentTestCase< +public class DataFrameTransformCheckpointStatsTests extends AbstractHlrcXContentTestCase< DataFrameTransformCheckpointStats, org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats> { @@ -36,7 +48,7 @@ public class DataFrameTransformCheckpointStatsHlrcTests extends AbstractHlrcXCon @Override protected DataFrameTransformCheckpointStats createTestInstance() { - return DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(); + return DataFrameTransformStateTests.randomDataFrameTransformCheckpointStats(); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java similarity index 54% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java index 10f0bf5f2b7..a461d277c19 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoHlrcTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java @@ -1,27 +1,39 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; +package org.elasticsearch.client.dataframe.transforms.hlrc; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfoTests; import java.io.IOException; -public class DataFrameTransformCheckpointingInfoHlrcTests extends AbstractHlrcXContentTestCase< +public class DataFrameTransformCheckpointingInfoTests extends AbstractHlrcXContentTestCase< DataFrameTransformCheckpointingInfo, org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo> { public static DataFrameTransformCheckpointingInfo fromHlrc( org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) { return new DataFrameTransformCheckpointingInfo( - DataFrameTransformCheckpointStatsHlrcTests.fromHlrc(instance.getCurrent()), - DataFrameTransformCheckpointStatsHlrcTests.fromHlrc(instance.getInProgress()), + DataFrameTransformCheckpointStatsTests.fromHlrc(instance.getCurrent()), + DataFrameTransformCheckpointStatsTests.fromHlrc(instance.getInProgress()), instance.getOperationsBehind()); } @@ -39,7 +51,7 @@ public class DataFrameTransformCheckpointingInfoHlrcTests extends AbstractHlrcX @Override protected DataFrameTransformCheckpointingInfo createTestInstance() { - return DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo(); + return DataFrameTransformStateTests.randomDataFrameTransformCheckpointingInfo(); } @Override @@ -52,4 +64,4 @@ public class DataFrameTransformCheckpointingInfoHlrcTests extends AbstractHlrcX return true; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java similarity index 56% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java index 6ef473cfb95..ad08881fb56 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsHlrcTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java @@ -1,21 +1,33 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; +package org.elasticsearch.client.dataframe.transforms.hlrc; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStatsTests; import java.io.IOException; import java.util.function.Predicate; -public class DataFrameTransformStateAndStatsHlrcTests extends AbstractHlrcXContentTestCase { @Override @@ -28,15 +40,15 @@ public class DataFrameTransformStateAndStatsHlrcTests extends AbstractHlrcXConte public DataFrameTransformStateAndStats convertHlrcToInternal( org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats instance) { return new DataFrameTransformStateAndStats(instance.getId(), - DataFrameTransformStateHlrcTests.fromHlrc(instance.getTransformState()), - DataFrameIndexerTransformStatsHlrcTests.fromHlrc(instance.getTransformStats()), - DataFrameTransformCheckpointingInfoHlrcTests.fromHlrc(instance.getCheckpointingInfo())); + DataFrameTransformStateTests.fromHlrc(instance.getTransformState()), + DataFrameIndexerTransformStatsTests.fromHlrc(instance.getTransformStats()), + DataFrameTransformCheckpointingInfoTests.fromHlrc(instance.getCheckpointingInfo())); } @Override protected DataFrameTransformStateAndStats createTestInstance() { // the transform id is not part of HLRC as it's only to a field for internal storage, therefore use a default id - return DataFrameTransformStateAndStatsTests + return DataFrameTransformStateTests .randomDataFrameTransformStateAndStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java new file mode 100644 index 00000000000..457c68d593e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms.hlrc; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.xpack.core.indexing.IndexerState; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase { + + public static DataFrameTransformState fromHlrc(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) { + return new DataFrameTransformState(DataFrameTransformTaskState.fromString(instance.getTaskState().value()), + IndexerState.fromString(instance.getIndexerState().value()), instance.getPosition(), instance.getCheckpoint(), + instance.getReason()); + } + + @Override + public org.elasticsearch.client.dataframe.transforms.DataFrameTransformState doHlrcParseInstance(XContentParser parser) + throws IOException { + return org.elasticsearch.client.dataframe.transforms.DataFrameTransformState.fromXContent(parser); + } + + @Override + public DataFrameTransformState convertHlrcToInternal(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) { + return fromHlrc(instance); + } + + @Override + protected DataFrameTransformState createTestInstance() { + return randomDataFrameTransformState(); + } + + @Override + protected DataFrameTransformState doParseInstance(XContentParser parser) throws IOException { + return DataFrameTransformState.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> field.equals("current_position"); + } + + public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) { + return new DataFrameTransformStateAndStats(id, + randomDataFrameTransformState(), + randomStats(id), + randomDataFrameTransformCheckpointingInfo()); + } + + public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { + return new DataFrameTransformCheckpointingInfo(randomDataFrameTransformCheckpointStats(), + randomDataFrameTransformCheckpointStats(), randomNonNegativeLong()); + } + + public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() { + return new DataFrameTransformCheckpointStats(randomNonNegativeLong(), randomNonNegativeLong()); + } + + public static DataFrameIndexerTransformStats randomStats(String transformId) { + return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L), + randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), + randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), + randomLongBetween(0L, 10000L)); + } + + public static DataFrameTransformState randomDataFrameTransformState() { + return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()), + randomFrom(IndexerState.values()), + randomPosition(), + randomLongBetween(0,10), + randomBoolean() ? null : randomAlphaOfLength(10)); + } + + private static Map randomPosition() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new HashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + return position; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/hlrc/GraphExploreResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/hlrc/GraphExploreResponseTests.java new file mode 100644 index 00000000000..919f85f01d8 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/hlrc/GraphExploreResponseTests.java @@ -0,0 +1,222 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.graph.hlrc; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.graph.Connection; +import org.elasticsearch.client.graph.GraphExploreResponse; +import org.elasticsearch.client.graph.Vertex; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractHlrcXContentTestCase< + org.elasticsearch.protocol.xpack.graph.GraphExploreResponse, + GraphExploreResponse> { + + static final Function VERTEX_ID_FUNCTION = + vId -> new org.elasticsearch.protocol.xpack.graph.Vertex.VertexId(vId.getField(), vId.getTerm()); + static final Function VERTEX_FUNCTION = + v -> new org.elasticsearch.protocol.xpack.graph.Vertex(v.getField(), v.getTerm(), v.getWeight(), + v.getHopDepth(), v.getBg(), v.getFg()); + + @Override + public GraphExploreResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContent(parser); + } + + @Override + public org.elasticsearch.protocol.xpack.graph.GraphExploreResponse convertHlrcToInternal(GraphExploreResponse instance) { + return new org.elasticsearch.protocol.xpack.graph.GraphExploreResponse(instance.getTookInMillis(), instance.isTimedOut(), + instance.getShardFailures(), convertVertices(instance), convertConnections(instance), instance.isReturnDetailedInfo()); + } + + public Map convertVertices( + GraphExploreResponse instance) { + final Collection vertexIds = instance.getVertexIds(); + final Map vertexMap = + new LinkedHashMap<>(vertexIds.size()); + + for (Vertex.VertexId vertexId : vertexIds) { + final Vertex vertex = instance.getVertex(vertexId); + + vertexMap.put(VERTEX_ID_FUNCTION.apply(vertexId), VERTEX_FUNCTION.apply(vertex)); + } + return vertexMap; + } + + public Map convertConnections(GraphExploreResponse instance) { + final Collection connectionIds = instance.getConnectionIds(); + final Map connectionMap= new LinkedHashMap<>(connectionIds.size()); + for (Connection.ConnectionId connectionId : connectionIds) { + final Connection connection = instance.getConnection(connectionId); + final ConnectionId connectionId1 = new ConnectionId(VERTEX_ID_FUNCTION.apply(connectionId.getSource()), + VERTEX_ID_FUNCTION.apply(connectionId.getTarget())); + final org.elasticsearch.protocol.xpack.graph.Connection connection1 = new org.elasticsearch.protocol.xpack.graph.Connection( + VERTEX_FUNCTION.apply(connection.getFrom()), VERTEX_FUNCTION.apply(connection.getTo()), connection.getWeight(), + connection.getDocCount()); + connectionMap.put(connectionId1, connection1); + } + return connectionMap; + } + + @Override + protected org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createTestInstance() { + return createInstance(0); + } + + private static org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = + new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + org.elasticsearch.protocol.xpack.graph.Vertex v = new org.elasticsearch.protocol.xpack.graph.Vertex("field1", + randomAlphaOfLength(5), randomDouble(), 0, + showDetails? randomIntBetween(100, 200):0, + showDetails? randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + org.elasticsearch.protocol.xpack.graph.Vertex[] vs = + vertices.values().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + org.elasticsearch.protocol.xpack.graph.Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + org.elasticsearch.protocol.xpack.graph.Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + org.elasticsearch.protocol.xpack.graph.Connection conn = new org.elasticsearch.protocol.xpack.graph.Connection(v1, v2, + randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new org.elasticsearch.protocol.xpack.graph.GraphExploreResponse(overallTookInMillis, timedOut, failures, + vertices, connections, showDetails); + } + + + private static org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected org.elasticsearch.protocol.xpack.graph.GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.protocol.xpack.graph.GraphExploreResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[]{"vertices", "connections"}; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances(org.elasticsearch.protocol.xpack.graph.GraphExploreResponse expectedInstance, + org.elasticsearch.protocol.xpack.graph.GraphExploreResponse newInstance) { + Assert.assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + Assert.assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Comparator connComparator = + Comparator.comparing(o -> o.getId().toString()); + org.elasticsearch.protocol.xpack.graph.Connection[] newConns = + newInstance.getConnections().toArray(new org.elasticsearch.protocol.xpack.graph.Connection[0]); + org.elasticsearch.protocol.xpack.graph.Connection[] expectedConns = + expectedInstance.getConnections().toArray(new org.elasticsearch.protocol.xpack.graph.Connection[0]); + Arrays.sort(newConns, connComparator); + Arrays.sort(expectedConns, connComparator); + Assert.assertArrayEquals(expectedConns, newConns); + + //Sort the vertices lists before equality test (map insertion sequences can cause order differences) + Comparator comparator = Comparator.comparing(o -> o.getId().toString()); + org.elasticsearch.protocol.xpack.graph.Vertex[] newVertices = + newInstance.getVertices().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[0]); + org.elasticsearch.protocol.xpack.graph.Vertex[] expectedVertices = + expectedInstance.getVertices().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[0]); + Arrays.sort(newVertices, comparator); + Arrays.sort(expectedVertices, comparator); + Assert.assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + Assert.assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link org.elasticsearch.protocol.xpack.graph.GraphExploreResponse} with inner failures as they + * don't support asserting on xcontent equivalence, given exceptions are not parsed back as the same original class. + * We run the usual {@link AbstractXContentTestCase#testFromXContent()} without failures, and this other test with + * failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = + GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent( + AbstractXContentTestCase.NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(), + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/GetBasicStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java similarity index 53% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/GetBasicStatusResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java index b746359d51a..fd2b6925888 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/GetBasicStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java @@ -1,14 +1,25 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol.xpack.license; +package org.elasticsearch.client.license; - -import org.elasticsearch.client.license.GetBasicStatusResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; public class GetBasicStatusResponseTests extends AbstractHlrcStreamableXContentTestCase { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/GetTrialStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java similarity index 53% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/GetTrialStatusResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java index 504f5f83b51..36a38c40b8b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/GetTrialStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java @@ -1,13 +1,25 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol.xpack.license; +package org.elasticsearch.client.license; -import org.elasticsearch.client.license.GetTrialStatusResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; public class GetTrialStatusResponseTests extends AbstractHlrcStreamableXContentTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/LicenseStatusTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/LicenseStatusTests.java new file mode 100644 index 00000000000..06e90da8a87 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/LicenseStatusTests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.license; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class LicenseStatusTests extends ESTestCase { + + public void testCompatibility() { + final LicenseStatus[] values = LicenseStatus.values(); + final LicenseStatus[] hlrcValues = LicenseStatus.values(); + + assertThat(values.length, equalTo(hlrcValues.length)); + + for (LicenseStatus value : values) { + final LicenseStatus licenseStatus = LicenseStatus.fromString(value.label()); + assertThat(licenseStatus.label(), equalTo(value.label())); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/PutLicenseResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/PutLicenseResponseTests.java new file mode 100644 index 00000000000..efb7ed98b36 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/PutLicenseResponseTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.license; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.protocol.xpack.license.LicensesStatus; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +public class PutLicenseResponseTests extends AbstractHlrcStreamableXContentTestCase< + org.elasticsearch.protocol.xpack.license.PutLicenseResponse, org.elasticsearch.client.license.PutLicenseResponse> { + + @Override + public org.elasticsearch.client.license.PutLicenseResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.license.PutLicenseResponse.fromXContent(parser); + } + + @Override + public org.elasticsearch.protocol.xpack.license.PutLicenseResponse convertHlrcToInternal( + org.elasticsearch.client.license.PutLicenseResponse instance) { + return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(instance.isAcknowledged(), + org.elasticsearch.protocol.xpack.license.LicensesStatus.valueOf(instance.status().name()), + instance.acknowledgeHeader(), instance.acknowledgeMessages()); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // The structure of the response is such that unknown fields inside acknowledge cannot be supported since they + // are treated as messages from new services + return p -> p.startsWith("acknowledge"); + } + + @Override + protected org.elasticsearch.protocol.xpack.license.PutLicenseResponse createTestInstance() { + boolean acknowledged = randomBoolean(); + org.elasticsearch.protocol.xpack.license.LicensesStatus status = + randomFrom(org.elasticsearch.protocol.xpack.license.LicensesStatus.VALID, + org.elasticsearch.protocol.xpack.license.LicensesStatus.INVALID, + org.elasticsearch.protocol.xpack.license.LicensesStatus.EXPIRED); + String messageHeader; + Map ackMessages; + if (randomBoolean()) { + messageHeader = randomAlphaOfLength(10); + ackMessages = randomAckMessages(); + } else { + messageHeader = null; + ackMessages = Collections.emptyMap(); + } + + return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(acknowledged, status, messageHeader, ackMessages); + } + + private static Map randomAckMessages() { + int nFeatures = randomIntBetween(1, 5); + + Map ackMessages = new HashMap<>(); + + for (int i = 0; i < nFeatures; i++) { + String feature = randomAlphaOfLengthBetween(9, 15); + int nMessages = randomIntBetween(1, 5); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = randomAlphaOfLengthBetween(10, 30); + } + ackMessages.put(feature, messages); + } + + return ackMessages; + } + + @Override + protected org.elasticsearch.protocol.xpack.license.PutLicenseResponse createBlankInstance() { + return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(); + } + + @Override + protected org.elasticsearch.protocol.xpack.license.PutLicenseResponse mutateInstance( + org.elasticsearch.protocol.xpack.license.PutLicenseResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new org.elasticsearch.protocol.xpack.license.PutLicenseResponse( + r.isAcknowledged() == false, + r.status(), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> new org.elasticsearch.protocol.xpack.license.PutLicenseResponse( + r.isAcknowledged(), + mutateStatus(r.status()), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> { + if (r.acknowledgeMessages().isEmpty()) { + return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse( + r.isAcknowledged(), + r.status(), + randomAlphaOfLength(10), + randomAckMessages() + ); + } else { + return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(r.isAcknowledged(), r.status()); + } + } + + ); + return mutator.apply(response); + } + + private org.elasticsearch.protocol.xpack.license.LicensesStatus mutateStatus( + org.elasticsearch.protocol.xpack.license.LicensesStatus status) { + return randomValueOtherThan(status, () -> randomFrom(LicensesStatus.values())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/StartBasicResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java similarity index 76% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/StartBasicResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java index 8f8a40f0894..69f831cff18 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/StartBasicResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java @@ -1,14 +1,27 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol.xpack.license; +package org.elasticsearch.client.license; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.license.PostStartBasicResponse; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import java.io.IOException; import java.util.Collections; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlInfoActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java similarity index 57% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlInfoActionResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java index fe59810b595..2ffb7744a3b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlInfoActionResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java @@ -1,13 +1,25 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.ml.action; +package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.MlInfoResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.ml.action.MlInfoAction.Response; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java new file mode 100644 index 00000000000..fd2b019c613 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PutCalendarActionResponseTests + extends AbstractHlrcStreamableXContentTestCase { + + @Override + protected PutCalendarAction.Response createTestInstance() { + return new PutCalendarAction.Response(testInstance()); + } + + @Override + protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException { + return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + public PutCalendarResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return PutCalendarResponse.fromXContent(parser); + } + + @Override + public PutCalendarAction.Response convertHlrcToInternal(PutCalendarResponse instance) { + org.elasticsearch.client.ml.calendars.Calendar hlrcCalendar = instance.getCalendar(); + Calendar internalCalendar = new Calendar(hlrcCalendar.getId(), hlrcCalendar.getJobIds(), hlrcCalendar.getDescription()); + return new PutCalendarAction.Response(internalCalendar); + } + + @Override + protected PutCalendarAction.Response createBlankInstance() { + return new PutCalendarAction.Response(); + } + + public static Calendar testInstance() { + return testInstance(new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()).ofCodePointsLength(random(), 10, 10)); + } + + public static Calendar testInstance(String calendarId) { + int size = randomInt(10); + List items = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + items.add(randomAlphaOfLengthBetween(1, 20)); + } + String description = null; + if (randomBoolean()) { + description = randomAlphaOfLength(20); + } + return new Calendar(calendarId, items, description); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java similarity index 54% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java index a605917f01c..bb748a71f42 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java @@ -1,12 +1,27 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.security.action.user; +package org.elasticsearch.client.security.hlrc; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; +import org.elasticsearch.client.security.HasPrivilegesResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -16,10 +31,11 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.hamcrest.Matchers; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; @@ -35,32 +51,36 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; -public class HasPrivilegesResponseTests - extends AbstractHlrcStreamableXContentTestCase { +public class HasPrivilegesResponseTests extends AbstractHlrcStreamableXContentTestCase< + org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse, + HasPrivilegesResponse> { public void testSerializationV64OrV65() throws IOException { - final HasPrivilegesResponse original = randomResponse(); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.V_6_5_1); - final HasPrivilegesResponse copy = serializeAndDeserialize(original, version); + final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original = randomResponse(); + final Version version = VersionUtils.randomVersionBetween(LuceneTestCase.random(), Version.V_6_4_0, Version.V_6_5_1); + final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy = serializeAndDeserialize(original, version); - assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); - assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable()); - assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); - assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges())); + Assert.assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); + Assert.assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable()); + Assert.assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + Assert.assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges())); } public void testSerializationV63() throws IOException { - final HasPrivilegesResponse original = randomResponse(); - final HasPrivilegesResponse copy = serializeAndDeserialize(original, Version.V_6_3_0); + final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original = randomResponse(); + final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy = + serializeAndDeserialize(original, Version.V_6_3_0); - assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); - assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable()); - assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); - assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap())); + Assert.assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); + Assert.assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable()); + Assert.assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + Assert.assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap())); } public void testToXContent() throws Exception { - final HasPrivilegesResponse response = new HasPrivilegesResponse("daredevil", false, Collections.singletonMap("manage", true), + final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse response = + new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse("daredevil", + false, Collections.singletonMap("manage", true), Arrays.asList( ResourcePrivileges.builder("staff") .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap<>()).put("read", true) @@ -77,7 +97,7 @@ public class HasPrivilegesResponseTests BytesReference bytes = BytesReference.bytes(builder); final String json = bytes.utf8ToString(); - assertThat(json, equalTo("{" + + Assert.assertThat(json, equalTo("{" + "\"username\":\"daredevil\"," + "\"has_all_requested\":false," + "\"cluster\":{\"manage\":true}," + @@ -96,23 +116,23 @@ public class HasPrivilegesResponseTests } @Override - protected HasPrivilegesResponse createBlankInstance() { - return new HasPrivilegesResponse(); + protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createBlankInstance() { + return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(); } @Override - protected HasPrivilegesResponse createTestInstance() { + protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createTestInstance() { return randomResponse(); } @Override - public org.elasticsearch.client.security.HasPrivilegesResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.security.HasPrivilegesResponse.fromXContent(parser); + public HasPrivilegesResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return HasPrivilegesResponse.fromXContent(parser); } @Override - public HasPrivilegesResponse convertHlrcToInternal(org.elasticsearch.client.security.HasPrivilegesResponse hlrc) { - return new HasPrivilegesResponse( + public org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse convertHlrcToInternal(HasPrivilegesResponse hlrc) { + return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse( hlrc.getUsername(), hlrc.hasAllRequested(), hlrc.getClusterPrivileges(), @@ -128,21 +148,23 @@ public class HasPrivilegesResponseTests .collect(Collectors.toList()); } - private HasPrivilegesResponse serializeAndDeserialize(HasPrivilegesResponse original, Version version) throws IOException { + private org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse serializeAndDeserialize( + org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original, Version version) throws IOException { logger.info("Test serialize/deserialize with version {}", version); final BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(version); original.writeTo(out); - final HasPrivilegesResponse copy = new HasPrivilegesResponse(); + final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy = + new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(); final StreamInput in = out.bytes().streamInput(); in.setVersion(version); copy.readFrom(in); - assertThat(in.read(), equalTo(-1)); + Assert.assertThat(in.read(), equalTo(-1)); return copy; } - private HasPrivilegesResponse randomResponse() { + private org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse randomResponse() { final String username = randomAlphaOfLengthBetween(4, 12); final Map cluster = new HashMap<>(); for (String priv : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) { @@ -150,16 +172,19 @@ public class HasPrivilegesResponseTests } final Collection index = randomResourcePrivileges(); final Map> application = new HashMap<>(); - for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { + for (String app : randomArray(1, 3, String[]::new, + () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { application.put(app, randomResourcePrivileges()); } - return new HasPrivilegesResponse(username, randomBoolean(), cluster, index, application); + return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(username, randomBoolean(), + cluster, index, application); } private Collection randomResourcePrivileges() { final Collection list = new ArrayList<>(); // Use hash set to force a unique set of resources - for (String resource : Sets.newHashSet(randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(2, 6)))) { + for (String resource : Sets.newHashSet(randomArray(1, 3, String[]::new, + () -> randomAlphaOfLengthBetween(2, 6)))) { final Map privileges = new HashMap<>(); for (String priv : randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))) { privileges.put(priv, randomBoolean()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java similarity index 90% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java index 9b71079a6e5..9ab115371a0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java @@ -1,10 +1,24 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -14,7 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; @@ -90,7 +104,7 @@ public class GetWatchResponseTests extends @Override protected GetWatchResponse createTestInstance() { String id = randomAlphaOfLength(10); - if (rarely()) { + if (LuceneTestCase.rarely()) { return new GetWatchResponse(id); } long version = randomLongBetween(0, 10); @@ -128,8 +142,8 @@ public class GetWatchResponseTests extends long version = randomLongBetween(-1, Long.MAX_VALUE); WatchStatus.State state = new WatchStatus.State(randomBoolean(), nowWithMillisResolution()); ExecutionState executionState = randomFrom(ExecutionState.values()); - ZonedDateTime lastChecked = rarely() ? null : nowWithMillisResolution(); - ZonedDateTime lastMetCondition = rarely() ? null : nowWithMillisResolution(); + ZonedDateTime lastChecked = LuceneTestCase.rarely() ? null : nowWithMillisResolution(); + ZonedDateTime lastMetCondition = LuceneTestCase.rarely() ? null : nowWithMillisResolution(); int size = randomIntBetween(0, 5); Map actionMap = new HashMap<>(); for (int i = 0; i < size; i++) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java new file mode 100644 index 00000000000..eebf2c9cef1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.watcher.hlrc; + +import org.elasticsearch.client.watcher.DeleteWatchResponse; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; + +import java.io.IOException; + +public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase< + org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse, DeleteWatchResponse> { + + @Override + protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean found = randomBoolean(); + return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(id, version, found); + } + + @Override + protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse.fromXContent(parser); + } + + @Override + public DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return DeleteWatchResponse.fromXContent(parser); + } + + @Override + public org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse convertHlrcToInternal(DeleteWatchResponse instance) { + return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(instance.getId(), instance.getVersion(), + instance.isFound()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/ExecuteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java similarity index 71% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/ExecuteWatchResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java index 47fd4a33dc9..ace75517a93 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/ExecuteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java @@ -1,17 +1,30 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher.hlrc; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java new file mode 100644 index 00000000000..9b65618cafc --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.watcher.hlrc; + +import org.elasticsearch.client.watcher.PutWatchResponse; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.AbstractHlrcXContentTestCase; + +import java.io.IOException; + +public class PutWatchResponseTests extends AbstractHlrcXContentTestCase< + org.elasticsearch.protocol.xpack.watcher.PutWatchResponse, PutWatchResponse> { + + @Override + protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long seqNo = randomNonNegativeLong(); + long primaryTerm = randomLongBetween(1, 20); + long version = randomLongBetween(1, 10); + boolean created = randomBoolean(); + return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(id, version, seqNo, primaryTerm, created); + } + + @Override + protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse doParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.protocol.xpack.watcher.PutWatchResponse.fromXContent(parser); + } + + @Override + public PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser); + } + + @Override + public org.elasticsearch.protocol.xpack.watcher.PutWatchResponse convertHlrcToInternal(PutWatchResponse instance) { + return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(instance.getId(), instance.getVersion(), + instance.getSeqNo(), instance.getPrimaryTerm(), instance.isCreated()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 337bd26f37d..a25c53506fb 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -46,11 +46,11 @@ dependencies { testCompile "org.slf4j:slf4j-api:${versions.slf4j}" testCompile project(path: ':modules:reindex', configuration: 'runtime') testCompile project(path: ':modules:parent-join', configuration: 'runtime') + testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') testCompile project(path: ':modules:analysis-common', configuration: 'runtime') testCompile(project(':x-pack:license-tools')) { transitive = false } - testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") } ext.expansions = [ diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java deleted file mode 100644 index 3235f62b3e4..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.protocol.xpack.graph; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ShardOperationFailedException; -import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; -import org.elasticsearch.test.AbstractXContentTestCase; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.equalTo; - -public class GraphExploreResponseTests extends - AbstractHlrcXContentTestCase { - - static final Function VERTEX_ID_FUNCTION = - vId -> new Vertex.VertexId(vId.getField(), vId.getTerm()); - static final Function VERTEX_FUNCTION = - v -> new Vertex(v.getField(), v.getTerm(), v.getWeight(), v.getHopDepth(), v.getBg(), v.getFg()); - - @Override - public org.elasticsearch.client.graph.GraphExploreResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.graph.GraphExploreResponse.fromXContent(parser); - } - - @Override - public GraphExploreResponse convertHlrcToInternal(org.elasticsearch.client.graph.GraphExploreResponse instance) { - return new GraphExploreResponse(instance.getTookInMillis(), instance.isTimedOut(), - instance.getShardFailures(), convertVertices(instance), convertConnections(instance), instance.isReturnDetailedInfo()); - } - - public Map convertVertices(org.elasticsearch.client.graph.GraphExploreResponse instance) { - final Collection vertexIds = instance.getVertexIds(); - final Map vertexMap = new LinkedHashMap<>(vertexIds.size()); - - for (org.elasticsearch.client.graph.Vertex.VertexId vertexId : vertexIds) { - final org.elasticsearch.client.graph.Vertex vertex = instance.getVertex(vertexId); - - vertexMap.put(VERTEX_ID_FUNCTION.apply(vertexId), VERTEX_FUNCTION.apply(vertex)); - } - return vertexMap; - } - - public Map convertConnections(org.elasticsearch.client.graph.GraphExploreResponse instance) { - final Collection connectionIds = instance.getConnectionIds(); - final Map connectionMap = new LinkedHashMap<>(connectionIds.size()); - for (org.elasticsearch.client.graph.Connection.ConnectionId connectionId : connectionIds) { - final org.elasticsearch.client.graph.Connection connection = instance.getConnection(connectionId); - final Connection.ConnectionId connectionId1 = - new Connection.ConnectionId(VERTEX_ID_FUNCTION.apply(connectionId.getSource()), - VERTEX_ID_FUNCTION.apply(connectionId.getTarget())); - final Connection connection1 = new Connection(VERTEX_FUNCTION.apply(connection.getFrom()), - VERTEX_FUNCTION.apply(connection.getTo()), - connection.getWeight(), connection.getDocCount()); - connectionMap.put(connectionId1, connection1); - } - return connectionMap; - } - - @Override - protected GraphExploreResponse createTestInstance() { - return createInstance(0); - } - - private static GraphExploreResponse createInstance(int numFailures) { - int numItems = randomIntBetween(4, 128); - boolean timedOut = randomBoolean(); - boolean showDetails = randomBoolean(); - long overallTookInMillis = randomNonNegativeLong(); - Map vertices = new HashMap<>(); - Map connections = new HashMap<>(); - ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; - for (int i = 0; i < failures.length; i++) { - failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); - } - - //Create random set of vertices - for (int i = 0; i < numItems; i++) { - Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, - showDetails?randomIntBetween(100, 200):0, - showDetails?randomIntBetween(1, 100):0); - vertices.put(v.getId(), v); - } - - //Wire up half the vertices randomly - Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); - for (int i = 0; i < numItems/2; i++) { - Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; - Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; - if(v1 != v2) { - Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); - connections.put(conn.getId(), conn); - } - } - return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); - } - - - private static GraphExploreResponse createTestInstanceWithFailures() { - return createInstance(randomIntBetween(1, 128)); - } - - @Override - protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { - return GraphExploreResponse.fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - protected boolean assertToXContentEquivalence() { - return false; - } - - @Override - protected String[] getShuffleFieldsExceptions() { - return new String[]{"vertices", "connections"}; - } - - protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { - return field -> field.startsWith("responses"); - } - - @Override - protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { - assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); - assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); - - Comparator connComparator = new Comparator() { - @Override - public int compare(Connection o1, Connection o2) { - return o1.getId().toString().compareTo(o2.getId().toString()); - } - }; - Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); - Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); - Arrays.sort(newConns, connComparator); - Arrays.sort(expectedConns, connComparator); - assertArrayEquals(expectedConns, newConns); - - //Sort the vertices lists before equality test (map insertion sequences can cause order differences) - Comparator comparator = new Comparator() { - @Override - public int compare(Vertex o1, Vertex o2) { - return o1.getId().toString().compareTo(o2.getId().toString()); - } - }; - Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); - Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); - Arrays.sort(newVertices, comparator); - Arrays.sort(expectedVertices, comparator); - assertArrayEquals(expectedVertices, newVertices); - - ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); - ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); - assertEquals(expectedFailures.length, newFailures.length); - - } - - /** - * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given - * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} - * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. - */ - public void testFromXContentWithFailures() throws IOException { - Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; - //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, - //but that does not bother our assertions, as we only want to test that we don't break. - boolean supportsUnknownFields = true; - //exceptions are not of the same type whenever parsed back - boolean assertToXContentEquivalence = false; - AbstractXContentTestCase.testFromXContent( - NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(), - getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, - this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); - } - -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java index 0b73850c5e6..a0649d1e7f4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java @@ -9,25 +9,10 @@ import java.io.IOException; import org.elasticsearch.test.ESTestCase; -import static org.hamcrest.CoreMatchers.equalTo; - public class LicenseStatusTests extends ESTestCase { public void testSerialization() throws IOException { LicenseStatus status = randomFrom(LicenseStatus.values()); assertSame(status, copyWriteable(status, writableRegistry(), LicenseStatus::readFrom)); } - public void testCompatibility() { - final LicenseStatus[] values = LicenseStatus.values(); - final org.elasticsearch.client.license.LicenseStatus[] hlrcValues = - org.elasticsearch.client.license.LicenseStatus.values(); - - assertThat(values.length, equalTo(hlrcValues.length)); - - for (LicenseStatus value : values) { - final org.elasticsearch.client.license.LicenseStatus licenseStatus = - org.elasticsearch.client.license.LicenseStatus.fromString(value.label()); - assertThat(licenseStatus.label(), equalTo(value.label())); - } - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java deleted file mode 100644 index 4480d925bed..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.protocol.xpack.license; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; -import java.util.function.Predicate; - -public class PutLicenseResponseTests extends - AbstractHlrcStreamableXContentTestCase { - - @Override - public org.elasticsearch.client.license.PutLicenseResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.license.PutLicenseResponse.fromXContent(parser); - } - - @Override - public PutLicenseResponse convertHlrcToInternal(org.elasticsearch.client.license.PutLicenseResponse instance) { - return new PutLicenseResponse(instance.isAcknowledged(), LicensesStatus.valueOf(instance.status().name()), - instance.acknowledgeHeader(), instance.acknowledgeMessages()); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - // The structure of the response is such that unknown fields inside acknowledge cannot be supported since they - // are treated as messages from new services - return p -> p.startsWith("acknowledge"); - } - - @Override - protected PutLicenseResponse createTestInstance() { - boolean acknowledged = randomBoolean(); - LicensesStatus status = randomFrom(LicensesStatus.VALID, LicensesStatus.INVALID, LicensesStatus.EXPIRED); - String messageHeader; - Map ackMessages; - if (randomBoolean()) { - messageHeader = randomAlphaOfLength(10); - ackMessages = randomAckMessages(); - } else { - messageHeader = null; - ackMessages = Collections.emptyMap(); - } - - return new PutLicenseResponse(acknowledged, status, messageHeader, ackMessages); - } - - private static Map randomAckMessages() { - int nFeatures = randomIntBetween(1, 5); - - Map ackMessages = new HashMap<>(); - - for (int i = 0; i < nFeatures; i++) { - String feature = randomAlphaOfLengthBetween(9, 15); - int nMessages = randomIntBetween(1, 5); - String[] messages = new String[nMessages]; - for (int j = 0; j < nMessages; j++) { - messages[j] = randomAlphaOfLengthBetween(10, 30); - } - ackMessages.put(feature, messages); - } - - return ackMessages; - } - - @Override - protected PutLicenseResponse createBlankInstance() { - return new PutLicenseResponse(); - } - - @Override - protected PutLicenseResponse mutateInstance(PutLicenseResponse response) { - @SuppressWarnings("unchecked") - Function mutator = randomFrom( - r -> new PutLicenseResponse( - r.isAcknowledged() == false, - r.status(), - r.acknowledgeHeader(), - r.acknowledgeMessages()), - r -> new PutLicenseResponse( - r.isAcknowledged(), - mutateStatus(r.status()), - r.acknowledgeHeader(), - r.acknowledgeMessages()), - r -> { - if (r.acknowledgeMessages().isEmpty()) { - return new PutLicenseResponse( - r.isAcknowledged(), - r.status(), - randomAlphaOfLength(10), - randomAckMessages() - ); - } else { - return new PutLicenseResponse(r.isAcknowledged(), r.status()); - } - } - - ); - return mutator.apply(response); - } - - private LicensesStatus mutateStatus(LicensesStatus status) { - return randomValueOtherThan(status, () -> randomFrom(LicensesStatus.values())); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java deleted file mode 100644 index 7486252f538..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.protocol.xpack.watcher; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; - -import java.io.IOException; - -public class DeleteWatchResponseTests extends - AbstractHlrcXContentTestCase { - - @Override - protected DeleteWatchResponse createTestInstance() { - String id = randomAlphaOfLength(10); - long version = randomLongBetween(1, 10); - boolean found = randomBoolean(); - return new DeleteWatchResponse(id, version, found); - } - - @Override - protected DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { - return DeleteWatchResponse.fromXContent(parser); - } - - @Override - public org.elasticsearch.client.watcher.DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.DeleteWatchResponse.fromXContent(parser); - } - - @Override - public DeleteWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.DeleteWatchResponse instance) { - return new DeleteWatchResponse(instance.getId(), instance.getVersion(), instance.isFound()); - } - - @Override - protected boolean supportsUnknownFields() { - return false; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java deleted file mode 100644 index 975d842b795..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.protocol.xpack.watcher; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; - -import java.io.IOException; - -public class PutWatchResponseTests extends - AbstractHlrcXContentTestCase { - - @Override - protected PutWatchResponse createTestInstance() { - String id = randomAlphaOfLength(10); - long seqNo = randomNonNegativeLong(); - long primaryTerm = randomLongBetween(1, 20); - long version = randomLongBetween(1, 10); - boolean created = randomBoolean(); - return new PutWatchResponse(id, version, seqNo, primaryTerm, created); - } - - @Override - protected PutWatchResponse doParseInstance(XContentParser parser) throws IOException { - return PutWatchResponse.fromXContent(parser); - } - - @Override - public org.elasticsearch.client.watcher.PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser); - } - - @Override - public PutWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.PutWatchResponse instance) { - return new PutWatchResponse(instance.getId(), instance.getVersion(), instance.getSeqNo(), instance.getPrimaryTerm(), - instance.isCreated()); - } - - @Override - protected boolean supportsUnknownFields() { - return false; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java deleted file mode 100644 index 5b2fa6ec33d..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/hlrc/DataFrameTransformStateHlrcTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe.transforms.hlrc; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; -import org.elasticsearch.xpack.core.indexing.IndexerState; - -import java.io.IOException; -import java.util.function.Predicate; - -public class DataFrameTransformStateHlrcTests extends AbstractHlrcXContentTestCase { - - public static DataFrameTransformState fromHlrc(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) { - return new DataFrameTransformState(DataFrameTransformTaskState.fromString(instance.getTaskState().value()), - IndexerState.fromString(instance.getIndexerState().value()), instance.getPosition(), instance.getCheckpoint(), - instance.getReason()); - } - - @Override - public org.elasticsearch.client.dataframe.transforms.DataFrameTransformState doHlrcParseInstance(XContentParser parser) - throws IOException { - return org.elasticsearch.client.dataframe.transforms.DataFrameTransformState.fromXContent(parser); - } - - @Override - public DataFrameTransformState convertHlrcToInternal(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) { - return fromHlrc(instance); - } - - @Override - protected DataFrameTransformState createTestInstance() { - return DataFrameTransformStateTests.randomDataFrameTransformState(); - } - - @Override - protected DataFrameTransformState doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformState.fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return field -> field.equals("current_position"); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java deleted file mode 100644 index 7f486698231..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.ml.action; - -import org.elasticsearch.client.ml.PutCalendarResponse; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.calendars.Calendar; -import org.elasticsearch.xpack.core.ml.calendars.CalendarTests; - -import java.io.IOException; - -public class PutCalendarActionResponseTests - extends AbstractHlrcStreamableXContentTestCase { - - @Override - protected PutCalendarAction.Response createTestInstance() { - return new PutCalendarAction.Response(CalendarTests.testInstance()); - } - - @Override - protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException { - return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build()); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - public PutCalendarResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return PutCalendarResponse.fromXContent(parser); - } - - @Override - public PutCalendarAction.Response convertHlrcToInternal(PutCalendarResponse instance) { - org.elasticsearch.client.ml.calendars.Calendar hlrcCalendar = instance.getCalendar(); - Calendar internalCalendar = new Calendar(hlrcCalendar.getId(), hlrcCalendar.getJobIds(), hlrcCalendar.getDescription()); - return new PutCalendarAction.Response(internalCalendar); - } - - @Override - protected PutCalendarAction.Response createBlankInstance() { - return new PutCalendarAction.Response(); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java index 56807d23913..121f40c44d9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.client.security.user.privileges.Role.ClusterPrivilegeName; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -66,7 +65,7 @@ public class LimitedRoleTests extends ESTestCase { .putAlias(AliasMetaData.builder("_alias1")); MetaData md = MetaData.builder().put(imbBuilder).put(imbBuilder1).build(); FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); - Role fromRole = Role.builder("a-role").cluster(Collections.singleton(ClusterPrivilegeName.MANAGE_SECURITY), Collections.emptyList()) + Role fromRole = Role.builder("a-role").cluster(Collections.singleton("manage_security"), Collections.emptyList()) .add(IndexPrivilege.ALL, "_index").add(IndexPrivilege.CREATE_INDEX, "_index1").build(); IndicesAccessControl iac = fromRole.authorize(SearchAction.NAME, Sets.newHashSet("_index", "_alias1"), md.getAliasAndIndexLookup(), @@ -84,7 +83,7 @@ public class LimitedRoleTests extends ESTestCase { { Role limitedByRole = Role.builder("limited-role") - .cluster(Collections.singleton(ClusterPrivilegeName.ALL), Collections.emptyList()).add(IndexPrivilege.READ, "_index") + .cluster(Collections.singleton("all"), Collections.emptyList()).add(IndexPrivilege.READ, "_index") .add(IndexPrivilege.NONE, "_index1").build(); iac = limitedByRole.authorize(SearchAction.NAME, Sets.newHashSet("_index", "_alias1"), md.getAliasAndIndexLookup(), fieldPermissionsCache); @@ -128,12 +127,12 @@ public class LimitedRoleTests extends ESTestCase { } public void testCheckClusterAction() { - Role fromRole = Role.builder("a-role").cluster(Collections.singleton(ClusterPrivilegeName.MANAGE_SECURITY), Collections.emptyList()) + Role fromRole = Role.builder("a-role").cluster(Collections.singleton("manage_security"), Collections.emptyList()) .build(); assertThat(fromRole.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class)), is(true)); { Role limitedByRole = Role.builder("limited-role") - .cluster(Collections.singleton(ClusterPrivilegeName.ALL), Collections.emptyList()).build(); + .cluster(Collections.singleton("all"), Collections.emptyList()).build(); assertThat(limitedByRole.checkClusterAction("cluster:admin/xpack/security/x", mock(TransportRequest.class)), is(true)); assertThat(limitedByRole.checkClusterAction("cluster:other-action", mock(TransportRequest.class)), is(true)); Role role = LimitedRole.createLimitedRole(fromRole, limitedByRole); @@ -142,7 +141,7 @@ public class LimitedRoleTests extends ESTestCase { } { Role limitedByRole = Role.builder("limited-role") - .cluster(Collections.singleton(ClusterPrivilegeName.MONITOR), Collections.emptyList()).build(); + .cluster(Collections.singleton("monitor"), Collections.emptyList()).build(); assertThat(limitedByRole.checkClusterAction("cluster:monitor/me", mock(TransportRequest.class)), is(true)); Role role = LimitedRole.createLimitedRole(fromRole, limitedByRole); assertThat(role.checkClusterAction("cluster:monitor/me", mock(TransportRequest.class)), is(false)); @@ -199,14 +198,14 @@ public class LimitedRoleTests extends ESTestCase { } public void testCheckClusterPrivilege() { - Role fromRole = Role.builder("a-role").cluster(Collections.singleton(ClusterPrivilegeName.MANAGE_SECURITY), Collections.emptyList()) + Role fromRole = Role.builder("a-role").cluster(Collections.singleton("manage_security"), Collections.emptyList()) .build(); assertThat(fromRole.grants(ClusterPrivilege.ALL), is(false)); assertThat(fromRole.grants(ClusterPrivilege.MANAGE_SECURITY), is(true)); { Role limitedByRole = Role.builder("scoped-role") - .cluster(Collections.singleton(ClusterPrivilegeName.ALL), Collections.emptyList()).build(); + .cluster(Collections.singleton("all"), Collections.emptyList()).build(); assertThat(limitedByRole.grants(ClusterPrivilege.ALL), is(true)); assertThat(limitedByRole.grants(ClusterPrivilege.MANAGE_SECURITY), is(true)); Role role = LimitedRole.createLimitedRole(fromRole, limitedByRole); @@ -215,7 +214,7 @@ public class LimitedRoleTests extends ESTestCase { } { Role limitedByRole = Role.builder("scoped-role") - .cluster(Collections.singleton(ClusterPrivilegeName.MONITOR), Collections.emptyList()).build(); + .cluster(Collections.singleton("monitor"), Collections.emptyList()).build(); assertThat(limitedByRole.grants(ClusterPrivilege.ALL), is(false)); assertThat(limitedByRole.grants(ClusterPrivilege.MONITOR), is(true)); Role role = LimitedRole.createLimitedRole(fromRole, limitedByRole); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 2278f27a6df..ba2ba455a50 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.security.user.privileges.Role.ClusterPrivilegeName; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -1049,9 +1048,9 @@ public class CompositeRolesStoreTests extends ESTestCase { doAnswer(invocationOnMock -> { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(new ApiKeyRoleDescriptors("keyId", - Collections.singletonList(new RoleDescriptor("a-role", new String[] { ClusterPrivilegeName.ALL }, null, null)), + Collections.singletonList(new RoleDescriptor("a-role", new String[] {"all"}, null, null)), Collections.singletonList( - new RoleDescriptor("scoped-role", new String[] { ClusterPrivilegeName.MANAGE_SECURITY }, null, null)))); + new RoleDescriptor("scoped-role", new String[] {"manage_security"}, null, null)))); return Void.TYPE; }).when(apiKeyService).getRoleForApiKey(eq(authentication), any(ActionListener.class)); diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index ba9e1ae2ff9..a29cbe00ffd 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -14,6 +14,7 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit + testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") } Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> From e611334b2bbf73e04ba63a1e5304cc7db02e0a28 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 10 Apr 2019 11:32:53 -0400 Subject: [PATCH 39/60] Add 7.0.1 version constant --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 8a41b49e03d..09578c3b0f6 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -130,6 +130,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_0_1_ID = 7000199; + public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version CURRENT = V_7_1_0; @@ -148,6 +150,8 @@ public class Version implements Comparable, ToXContentFragment { switch (id) { case V_7_1_0_ID: return V_7_1_0; + case V_7_0_1_ID: + return V_7_0_1; case V_7_0_0_ID: return V_7_0_0; case V_6_7_1_ID: From c37b127a073cfcd39bcff49c3156cc3be932c62b Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 10 Apr 2019 18:43:41 +0200 Subject: [PATCH 40/60] fix a timing issue: isFinished is used for a busy loop in testing, (#41055) test: ensure state is persisted before the isFinished is changed fixes #41046 --- .../xpack/rollup/job/RollupIndexerStateTests.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 23838a59873..49baeb4fade 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -256,7 +256,6 @@ public class RollupIndexerStateTests extends ESTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41046") public void testIndexing() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); @@ -267,8 +266,8 @@ public class RollupIndexerStateTests extends ESTestCase { @Override protected void onFinish(ActionListener listener) { super.onFinish(ActionListener.wrap(r -> { - isFinished.set(true); listener.onResponse(r); + isFinished.set(true); }, listener::onFailure)); } }; @@ -316,8 +315,8 @@ public class RollupIndexerStateTests extends ESTestCase { @Override protected void onFinish(ActionListener listener) { super.onFinish(ActionListener.wrap(r -> { - isFinished.set(true); listener.onResponse(r); + isFinished.set(true); }, listener::onFailure)); } }; From 884c3fd7caaf38b33fe4458ba62c7d3f9247287c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 10 Apr 2019 10:43:01 -0700 Subject: [PATCH 41/60] Use separate tasks per bwc artifact (#40703) (#40826) This commit changes the bwc builds from a single task for a branch to a task for each bwc artifact. This reduces the bwc build time when only needing a specific artifact, for example when running cluster restart tests on a mac, the windows artifacts or rpm/debs are not needed. --- distribution/bwc/build.gradle | 115 ++++++++++++++++++---------------- 1 file changed, 62 insertions(+), 53 deletions(-) diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 2a581cf1480..8285d8dae2b 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -120,46 +120,8 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased } } - Map artifactFiles = [:] - List projectDirs = [] - List projects = ['deb', 'rpm'] - if (bwcVersion.onOrAfter('7.0.0')) { - projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar']) - } else { - projects.add('zip') - } - - for (String projectName : projects) { - String baseDir = "distribution" - String classifier = "" - String extension = projectName - if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) { - int index = projectName.indexOf('-') - classifier = "-${projectName.substring(0, index)}-x86_64" - extension = projectName.substring(index + 1) - if (extension.equals('tar')) { - extension += '.gz' - } - } - if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) { - classifier = "-amd64" - } - if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) { - classifier = "-x86_64" - } - if (bwcVersion.onOrAfter('6.3.0')) { - baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages' - // add oss variant first - projectDirs.add("${baseDir}/oss-${projectName}") - artifactFiles.put("oss-" + projectName, file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}")) - } - projectDirs.add("${baseDir}/${projectName}") - artifactFiles.put(projectName, - file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}")) - } - Closure createRunBwcGradleTask = { name, extraConfig -> - task "$name"(type: Exec) { + return tasks.create(name: "$name", type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir doFirst { @@ -217,28 +179,75 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased } } - createRunBwcGradleTask("buildBwcVersion") { - for (String dir : projectDirs) { - args ":${dir.replace('/', ':')}:assemble" - } - doLast { - List missing = artifactFiles.values().grep { file -> - false == file.exists() - } - if (false == missing.empty) { - throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected files ${missing}") - } - } + Closure buildBwcTaskName = { projectName -> + return "buildBwc${projectName.replaceAll(/-\w/){ it[1].toUpperCase() }.capitalize()}" } + task buildBwc {} + + Closure createBuildBwcTask = { projectName, projectDir, projectArtifact -> + Task bwcTask = createRunBwcGradleTask(buildBwcTaskName(projectName)) { + args ":${projectDir.replace('/', ':')}:assemble" + doLast { + if (projectArtifact.exists() == false) { + throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected file ${projectArtifact}") + } + } + } + buildBwc.dependsOn bwcTask + } + + Map artifactFiles = [:] + List projectDirs = [] + List projects = ['deb', 'rpm'] + if (bwcVersion.onOrAfter('7.0.0')) { + projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar']) + } else { + projects.add('zip') + } + + for (String projectName : projects) { + String baseDir = "distribution" + String classifier = "" + String extension = projectName + if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) { + int index = projectName.indexOf('-') + classifier = "-${projectName.substring(0, index)}-x86_64" + extension = projectName.substring(index + 1) + if (extension.equals('tar')) { + extension += '.gz' + } + } + if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) { + classifier = "-amd64" + } + if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) { + classifier = "-x86_64" + } + if (bwcVersion.onOrAfter('6.3.0')) { + baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages' + // add oss variant first + projectDirs.add("${baseDir}/oss-${projectName}") + File ossProjectArtifact = file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}") + artifactFiles.put("oss-" + projectName, ossProjectArtifact) + createBuildBwcTask("oss-${projectName}", "${baseDir}/oss-${projectName}", ossProjectArtifact) + } + projectDirs.add("${baseDir}/${projectName}") + File projectArtifact = file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}") + artifactFiles.put(projectName, projectArtifact) + + createBuildBwcTask(projectName, "${baseDir}/${projectName}", projectArtifact) + } + + createRunBwcGradleTask("resolveAllBwcDependencies") { args 'resolveAllDependencies' } - resolveAllDependencies.dependsOn resolveAllBwcDependencies for (e in artifactFiles) { String projectName = e.key + String buildBwcTask = buildBwcTaskName(projectName) File artifactFile = e.value String artifactFileName = artifactFile.name String artifactName = artifactFileName.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' @@ -251,7 +260,7 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased } configurations.create(projectName) artifacts { - it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcVersion]) + it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcTask]) } } // make sure no dependencies were added to assemble; we want it to be a no-op From 24446ceae0932eb01f3784bd3842c859103e0b3c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 10 Apr 2019 13:38:44 -0400 Subject: [PATCH 42/60] Add packaging to cluster stats response (#41048) This commit adds a packaging_types field to the cluster stats response that outlines the build flavors and types present in a cluster. --- docs/reference/cluster/stats.asciidoc | 11 +++++- .../test/cluster.stats/10_basic.yml | 12 ++++++ .../cluster/stats/ClusterStatsNodes.java | 39 +++++++++++++++++++ .../ClusterStatsMonitoringDocTests.java | 15 ++++++- 4 files changed, 74 insertions(+), 3 deletions(-) diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 38028d8cf10..791fc2414f3 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -195,13 +195,19 @@ Will return, for example: }, "discovery_types": { ... - } + }, + "packaging_types": [ + { + ... + } + ] } } -------------------------------------------------- // TESTRESPONSE[s/"plugins": \[[^\]]*\]/"plugins": $body.$_path/] // TESTRESPONSE[s/"network_types": \{[^\}]*\}/"network_types": $body.$_path/] // TESTRESPONSE[s/"discovery_types": \{[^\}]*\}/"discovery_types": $body.$_path/] +// TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/: true|false/: $body.$_path/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] @@ -209,7 +215,8 @@ Will return, for example: // 1. Ignore the contents of the `plugins` object because we don't know all of // the plugins that will be in it. And because we figure folks don't need to // see an exhaustive list anyway. -// 2. Similarly, ignore the contents of `network_types` and `discovery_types`. +// 2. Similarly, ignore the contents of `network_types`, `discovery_types`, and +// `packaging_types`. // 3. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml index bb57709e147..3b7892e0c03 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -77,3 +77,15 @@ cluster.stats: {} - is_true: nodes.discovery_types + +--- +"get cluster stats returns packaging types": + + - skip: + version: " - 7.0.99" + reason: "packaging types are added for v7.1.0" + + - do: + cluster.stats: {} + + - is_true: nodes.packaging_types diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 3b3fa480326..5f36c4ebf59 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -61,6 +62,7 @@ public class ClusterStatsNodes implements ToXContentFragment { private final Set plugins; private final NetworkTypes networkTypes; private final DiscoveryTypes discoveryTypes; + private final PackagingTypes packagingTypes; ClusterStatsNodes(List nodeResponses) { this.versions = new HashSet<>(); @@ -93,6 +95,7 @@ public class ClusterStatsNodes implements ToXContentFragment { this.jvm = new JvmStats(nodeInfos, nodeStats); this.networkTypes = new NetworkTypes(nodeInfos); this.discoveryTypes = new DiscoveryTypes(nodeInfos); + this.packagingTypes = new PackagingTypes(nodeInfos); } public Counts getCounts() { @@ -172,6 +175,8 @@ public class ClusterStatsNodes implements ToXContentFragment { builder.endObject(); discoveryTypes.toXContent(builder, params); + + packagingTypes.toXContent(builder, params); return builder; } @@ -650,4 +655,38 @@ public class ClusterStatsNodes implements ToXContentFragment { } } + static class PackagingTypes implements ToXContentFragment { + + private final Map, AtomicInteger> packagingTypes; + + PackagingTypes(final List nodeInfos) { + final Map, AtomicInteger> packagingTypes = new HashMap<>(); + for (final NodeInfo nodeInfo : nodeInfos) { + final String flavor = nodeInfo.getBuild().flavor().displayName(); + final String type = nodeInfo.getBuild().type().displayName(); + packagingTypes.computeIfAbsent(Tuple.tuple(flavor, type), k -> new AtomicInteger()).incrementAndGet(); + } + this.packagingTypes = Collections.unmodifiableMap(packagingTypes); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startArray("packaging_types"); + { + for (final Map.Entry, AtomicInteger> entry : packagingTypes.entrySet()) { + builder.startObject(); + { + builder.field("flavor", entry.getKey().v1()); + builder.field("type", entry.getKey().v2()); + builder.field("count", entry.getValue().get()); + } + builder.endObject(); + } + } + builder.endArray(); + return builder; + } + + } + } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index e436484bceb..a8fc173551d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.collector.cluster; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; @@ -268,6 +269,11 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase Date: Wed, 10 Apr 2019 14:35:24 -0400 Subject: [PATCH 43/60] [DOCS] Restructure `ids` to new query docs format (#41077) --- docs/reference/query-dsl/ids-query.asciidoc | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index 8798a2fb093..6056c24702c 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -1,8 +1,9 @@ [[query-dsl-ids-query]] === Ids Query +Returns documents based on their IDs. This query uses document IDs stored in +the <> field. -Filters documents that only have the provided ids. Note, this query -uses the <> field. +==== Example request [source,js] -------------------------------------------------- @@ -16,3 +17,11 @@ GET /_search } -------------------------------------------------- // CONSOLE + +==== Top-level parameters for `ids` + +[cols="v,v",options="header"] +|====== +|Parameter |Description +|`values` |An array of <>. +|====== \ No newline at end of file From 999462f46082c2ab851d5ea91c42e63622299e20 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 10 Apr 2019 14:46:51 -0400 Subject: [PATCH 44/60] [DOCS] Document limits for JSON objects with `ignore_malformed` mapping setting (#40976) --- .../mapping/params/ignore-malformed.asciidoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 30aa6c4e8bc..f8f9a4591f9 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -91,3 +91,16 @@ become meaningless. Elasticsearch makes it easy to check how many documents have malformed fields by using `exist` or `term` queries on the special <> field. +==== Limits for JSON Objects +You can't use `ignore_malformed` with the following datatypes: + +* <> +* <> +* <> + +You also can't use `ignore_malformed` to ignore JSON objects submitted to fields +of the wrong datatype. A JSON object is any data surrounded by curly brackets +`"{}"` and includes data mapped to the nested, object, and range datatypes. + +If you submit a JSON object to an unsupported field, {es} will return an error +and reject the entire document regardless of the `ignore_malformed` setting. \ No newline at end of file From 736c7285d4ca8bef1afe684924ca062e22c823bd Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Thu, 11 Apr 2019 11:29:51 +0300 Subject: [PATCH 45/60] [ML] Fix scroll size comparison in DatafeedUpdate.isNoop (#41056) (#41079) Note this does not affect users as the method is only used in tests. --- .../elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 78b4e4ec7c2..ccbb5161972 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -381,7 +381,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { && (queryDelay == null || Objects.equals(queryDelay, datafeed.getQueryDelay())) && (indices == null || Objects.equals(indices, datafeed.getIndices())) && (queryProvider == null || Objects.equals(queryProvider.getQuery(), datafeed.getQuery())) - && (scrollSize == null || Objects.equals(scrollSize, datafeed.getQueryDelay())) + && (scrollSize == null || Objects.equals(scrollSize, datafeed.getScrollSize())) && (aggProvider == null || Objects.equals(aggProvider.getAggs(), datafeed.getAggregations())) && (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields())) && (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig())) From b967a97f8eb770217f7f2087678387267c1717bd Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Thu, 11 Apr 2019 12:09:53 +0200 Subject: [PATCH 46/60] Reindex from remote deprecation warning (#41005) If a reindex from remote request contains an index name that is URL escaped, we now issue a warning to be able to not support this in 8.0. --- .../index/reindex/remote/RemoteRequestBuilders.java | 10 ++++++++++ .../reindex/remote/RemoteRequestBuildersTests.java | 3 +++ 2 files changed, 13 insertions(+) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 2423de5fd70..7a6eeb70920 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -21,12 +21,14 @@ package org.elasticsearch.index.reindex.remote; import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.Request; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -53,6 +55,13 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; * because the version constants have been removed. */ final class RemoteRequestBuilders { + private static final DeprecationLogger DEPRECATION_LOGGER + = new DeprecationLogger(LogManager.getLogger(RemoteRequestBuilders.class)); + + static final String DEPRECATED_URL_ENCODED_INDEX_WARNING = + "Specifying index name using URL escaped index names for reindex from remote is deprecated. " + + "Instead specify index name without URL escaping."; + private RemoteRequestBuilders() {} static Request initialSearch(SearchRequest searchRequest, BytesReference query, Version remoteVersion) { @@ -173,6 +182,7 @@ final class RemoteRequestBuilders { private static String encodeIndex(String s) { if (s.contains("%")) { // already encoded, pass-through to allow this in mixed version clusters checkIndexOrType("Index", s); + DEPRECATION_LOGGER.deprecated(DEPRECATED_URL_ENCODED_INDEX_WARNING); return s; } try { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index eb6192a0431..563bbce0008 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -37,6 +37,7 @@ import java.nio.charset.StandardCharsets; import java.util.Map; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.DEPRECATED_URL_ENCODED_INDEX_WARNING; import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.clearScroll; import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearch; import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scroll; @@ -83,6 +84,8 @@ public class RemoteRequestBuildersTests extends ESTestCase { searchRequest.indices("%2f", "%3a"); assertEquals("/%2f,%3a/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertWarnings(DEPRECATED_URL_ENCODED_INDEX_WARNING); + // do not allow , and / if already escaped. searchRequest.indices("%2fcat,"); expectBadStartRequest(searchRequest, "Index", ",", "%2fcat,"); From a6dc20a09dd8f12b05dca680cff986622c15134f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 11 Apr 2019 03:38:12 -0700 Subject: [PATCH 47/60] Create heap dump on OOME (#41031) --- gradle.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle.properties b/gradle.properties index 5540bbe9022..ec79845d44e 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,3 +1,3 @@ org.gradle.daemon=true -org.gradle.jvmargs=-Xmx2g +org.gradle.jvmargs=-Xmx2g -XX:+HeapDumpOnOutOfMemoryError options.forkOptions.memoryMaximumSize=2g From 9cfe19413130cd75366f926f849d2dc658ad3dd2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 11 Apr 2019 13:47:14 +0200 Subject: [PATCH 48/60] Disable TestingConventions Check in Reindex Module (#41100) (#41110) * This is currently failing with: ``` Test classes are not included in any enabled task (:modules:reindex:test): * org.elasticsearch.client.documentation.ReindexDocumentationIT * org.elasticsearch.index.reindex.ManyDocumentsIT * org.elasticsearch.index.reindex.ReindexClientYamlTestSuiteIT * org.elasticsearch.index.reindex.ReindexWithoutContentIT * org.elasticsearch.index.reindex.remote.ReindexFromOldRemoteIT ``` * Same fix as in #38546 --- modules/reindex/build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 400bb3d4841..48888d1bfce 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -97,6 +97,7 @@ dependencies { // Issue tracked in https://github.com/elastic/elasticsearch/issues/40904 if (project.inFipsJvm) { + testingConventions.enabled = false integTest.enabled = false } From 93d6766afe948b1ce85e246169b46c4596b44ac7 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 11 Apr 2019 08:48:42 -0400 Subject: [PATCH 49/60] [DOCS] Improve docs for 'elasticsearch-keystore add-file' command (#41084) --- docs/plugins/repository-gcs.asciidoc | 2 +- docs/reference/setup/secure-settings.asciidoc | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 9985a89a155..436f9cc9aae 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -152,7 +152,7 @@ Some settings are sensitive and must be stored in the [source,sh] ---- -bin/elasticsearch-keystore add-file gcs.client.default.credentials_file +bin/elasticsearch-keystore add-file gcs.client.default.credentials_file /path/service-account.json ---- The following are the available client settings. Those that must be stored in the keystore diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 6abf5dea14d..4e3799db75b 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -64,6 +64,18 @@ through stdin, use the `--stdin` flag: cat /file/containing/setting/value | bin/elasticsearch-keystore add --stdin the.setting.name.to.set ---------------------------------------------------------------- +[float] +[[add-file-to-keystore]] +=== Adding file settings +You can add sensitive files, like authentication key files for cloud plugins, +using the `add-file` command. Be sure to include your file path as an argument +after the setting name. + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore add-file the.setting.name.to.set /path/example-file.json +---------------------------------------------------------------- + [float] [[remove-settings]] === Removing settings From 233df6b73b2ce5a063717fda8dc8a5f664e02247 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 11 Apr 2019 16:01:52 +0200 Subject: [PATCH 50/60] Make Transport Shard Bulk Action Async (#39793) (#41112) This is a dependency of #39504 Motivation: By refactoring `TransportShardBulkAction#shardOperationOnPrimary` to async, we enable using `DeterministicTaskQueue` based tests to run indexing operations. This was previously impossible since we were blocking on the `write` thread until the `update` thread finished the mapping update. With this change, the mapping update will trigger a new task in the `write` queue instead. This change significantly enhances the amount of coverage we get from `SnapshotResiliencyTests` (and other potential future tests) when it comes to tracking down concurrency issues with distributed state machines. The logical change is effectively all in `TransportShardBulkAction`, the rest of the changes is then simply mechanically moving the caller code and tests to being async and passing the `ActionListener` down. Since the move to async would've added more parameters to the `private static` steps in this logic, I decided to inline and dry up (between delete and update) the logic as much as I could instead of passing the listener + wait-consumer down through all of them. --- ...TransportVerifyShardBeforeCloseAction.java | 12 +- .../flush/TransportShardFlushAction.java | 13 +- .../refresh/TransportShardRefreshAction.java | 13 +- .../action/bulk/MappingUpdatePerformer.java | 3 +- .../action/bulk/TransportShardBulkAction.java | 307 +++++++++--------- .../TransportResyncReplicationAction.java | 10 +- .../replication/ReplicationOperation.java | 19 +- .../TransportReplicationAction.java | 97 +++--- .../replication/TransportWriteAction.java | 8 +- .../action/index/MappingUpdatedAction.java | 45 +-- .../common/util/concurrent/FutureUtils.java | 16 +- .../seqno/GlobalCheckpointSyncAction.java | 10 +- .../RetentionLeaseBackgroundSyncAction.java | 16 +- .../index/seqno/RetentionLeaseSyncAction.java | 17 +- .../elasticsearch/indices/IndicesModule.java | 4 +- ...portVerifyShardBeforeCloseActionTests.java | 31 +- .../admin/indices/create/SplitIndexIT.java | 1 - .../action/bulk/BulkRejectionIT.java | 74 +++++ .../bulk/TransportShardBulkActionTests.java | 161 +++++---- .../ReplicationOperationTests.java | 15 +- .../TransportReplicationActionTests.java | 20 +- ...ReplicationAllPermitsAcquisitionTests.java | 15 +- .../TransportWriteActionTests.java | 99 +++--- .../GlobalCheckpointSyncActionTests.java | 3 +- ...tentionLeaseBackgroundSyncActionTests.java | 23 +- .../seqno/RetentionLeaseSyncActionTests.java | 24 +- .../action/support/ActionTestUtils.java | 8 + .../ESIndexLevelReplicationTestCase.java | 108 +++--- .../TransportBulkShardOperationsAction.java | 8 +- .../ShardFollowTaskReplicationTests.java | 28 +- 30 files changed, 691 insertions(+), 517 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index e0cddcb0acf..f29bf6987a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -85,14 +85,16 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA } @Override - protected PrimaryResult shardOperationOnPrimary(final ShardRequest shardRequest, - final IndexShard primary) throws Exception { - executeShardOperation(shardRequest, primary); - return new PrimaryResult<>(shardRequest, new ReplicationResponse()); + protected void shardOperationOnPrimary(final ShardRequest shardRequest, final IndexShard primary, + ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + executeShardOperation(shardRequest, primary); + return new PrimaryResult<>(shardRequest, new ReplicationResponse()); + }); } @Override - protected ReplicaResult shardOperationOnReplica(final ShardRequest shardRequest, final IndexShard replica) throws Exception { + protected ReplicaResult shardOperationOnReplica(final ShardRequest shardRequest, final IndexShard replica) { executeShardOperation(shardRequest, replica); return new ReplicaResult(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 63424844d7d..a07dee9613a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; @@ -51,11 +52,13 @@ public class TransportShardFlushAction } @Override - protected PrimaryResult shardOperationOnPrimary(ShardFlushRequest shardRequest, - IndexShard primary) { - primary.flush(shardRequest.getRequest()); - logger.trace("{} flush request executed on primary", primary.shardId()); - return new PrimaryResult<>(shardRequest, new ReplicationResponse()); + protected void shardOperationOnPrimary(ShardFlushRequest shardRequest, IndexShard primary, + ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + primary.flush(shardRequest.getRequest()); + logger.trace("{} flush request executed on primary", primary.shardId()); + return new PrimaryResult<>(shardRequest, new ReplicationResponse()); + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index df3ff16ff88..c0a52ac8c0d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.refresh; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -53,11 +54,13 @@ public class TransportShardRefreshAction } @Override - protected PrimaryResult shardOperationOnPrimary( - BasicReplicationRequest shardRequest, IndexShard primary) { - primary.refresh("api"); - logger.trace("{} refresh request executed on primary", primary.shardId()); - return new PrimaryResult<>(shardRequest, new ReplicationResponse()); + protected void shardOperationOnPrimary(BasicReplicationRequest shardRequest, IndexShard primary, + ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + primary.refresh("api"); + logger.trace("{} refresh request executed on primary", primary.shardId()); + return new PrimaryResult<>(shardRequest, new ReplicationResponse()); + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java b/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java index 1f228b0f355..5a38f0f43e0 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.shard.ShardId; @@ -27,6 +28,6 @@ public interface MappingUpdatePerformer { /** * Update the mappings on the master. */ - void updateMappings(Mapping update, ShardId shardId, String type); + void updateMappings(Mapping update, ShardId shardId, String type, ActionListener listener); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 325820532be..372923c9036 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -22,7 +22,10 @@ package org.elasticsearch.action.bulk; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.MessageSupplier; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -30,7 +33,6 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.update.UpdateHelper; @@ -44,8 +46,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.CheckedRunnable; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -57,7 +57,6 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperException; -import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -69,10 +68,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.Map; +import java.util.concurrent.Executor; import java.util.function.Consumer; -import java.util.function.Function; import java.util.function.LongSupplier; /** Performs shard-level bulk (index, delete or update) operations */ @@ -82,7 +80,6 @@ public class TransportShardBulkAction extends TransportWriteAction shardOperationOnPrimary(BulkShardRequest request, IndexShard primary) - throws Exception { + protected void shardOperationOnPrimary(BulkShardRequest request, IndexShard primary, + ActionListener> listener) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext()); - CheckedRunnable waitForMappingUpdate = () -> { - PlainActionFuture waitingFuture = new PlainActionFuture<>(); - observer.waitForNextChange(new ClusterStateObserver.Listener() { + performOnPrimary(request, primary, updateHelper, threadPool::relativeTimeInMillis, + (update, shardId, type, mappingListener) -> { + assert update != null; + assert shardId != null; + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update, mappingListener); + }, + mappingUpdateListener -> observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { - waitingFuture.onResponse(null); + mappingUpdateListener.onResponse(null); } @Override public void onClusterServiceClose() { - waitingFuture.onFailure(new NodeClosedException(clusterService.localNode())); + mappingUpdateListener.onFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { - waitingFuture.onFailure( - new MapperException("timed out while waiting for a dynamic mapping update")); + mappingUpdateListener.onFailure(new MapperException("timed out while waiting for a dynamic mapping update")); } - }); - waitingFuture.get(); - }; - return performOnPrimary(request, primary, updateHelper, threadPool::absoluteTimeInMillis, - new ConcreteMappingUpdatePerformer(), waitForMappingUpdate); + }), listener, threadPool + ); } - public static WritePrimaryResult performOnPrimary( + public static void performOnPrimary( BulkShardRequest request, IndexShard primary, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, MappingUpdatePerformer mappingUpdater, - CheckedRunnable waitForMappingUpdate) throws Exception { - BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(request, primary); - return performOnPrimary(context, updateHelper, nowInMillisSupplier, mappingUpdater, waitForMappingUpdate); + Consumer> waitForMappingUpdate, + ActionListener> listener, + ThreadPool threadPool) { + new ActionRunnable>(listener) { + + private final Executor executor = threadPool.executor(ThreadPool.Names.WRITE); + + private final BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(request, primary); + + @Override + protected void doRun() throws Exception { + while (context.hasMoreOperationsToExecute()) { + if (executeBulkItemRequest(context, updateHelper, nowInMillisSupplier, mappingUpdater, waitForMappingUpdate, + ActionListener.wrap(v -> executor.execute(this), this::onRejection)) == false) { + // We are waiting for a mapping update on another thread, that will invoke this action again once its done + // so we just break out here. + return; + } + assert context.isInitial(); // either completed and moved to next or reset + } + // We're done, there's no more operations to execute so we resolve the wrapped listener + finishRequest(); + } + + @Override + public void onRejection(Exception e) { + // Fail all operations after a bulk rejection hit an action that waited for a mapping update and finish the request + while (context.hasMoreOperationsToExecute()) { + context.setRequestToExecute(context.getCurrent()); + final DocWriteRequest docWriteRequest = context.getRequestToExecute(); + onComplete( + exceptionToResult( + e, primary, docWriteRequest.opType() == DocWriteRequest.OpType.DELETE, docWriteRequest.version()), + context, null); + } + finishRequest(); + } + + private void finishRequest() { + ActionListener.completeWith(listener, + () -> new WritePrimaryResult<>( + context.getBulkShardRequest(), context.buildShardResponse(), context.getLocationToSync(), null, + context.getPrimary(), logger)); + } + }.run(); } - private static WritePrimaryResult performOnPrimary( - BulkPrimaryExecutionContext context, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, - MappingUpdatePerformer mappingUpdater, CheckedRunnable waitForMappingUpdate) throws Exception { - - while (context.hasMoreOperationsToExecute()) { - executeBulkItemRequest(context, updateHelper, nowInMillisSupplier, mappingUpdater, waitForMappingUpdate); - assert context.isInitial(); // either completed and moved to next or reset - } - return new WritePrimaryResult<>(context.getBulkShardRequest(), context.buildShardResponse(), context.getLocationToSync(), - null, context.getPrimary(), logger); - } - - /** Executes bulk item requests and handles request execution exceptions */ - static void executeBulkItemRequest(BulkPrimaryExecutionContext context, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, - MappingUpdatePerformer mappingUpdater, CheckedRunnable waitForMappingUpdate) - throws Exception { + /** + * Executes bulk item requests and handles request execution exceptions. + * @return {@code true} if request completed on this thread and the listener was invoked, {@code false} if the request triggered + * a mapping update that will finish and invoke the listener on a different thread + */ + static boolean executeBulkItemRequest(BulkPrimaryExecutionContext context, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, + MappingUpdatePerformer mappingUpdater, Consumer> waitForMappingUpdate, + ActionListener itemDoneListener) throws Exception { final DocWriteRequest.OpType opType = context.getCurrent().opType(); final UpdateHelper.Result updateResult; @@ -179,11 +209,12 @@ public class TransportShardBulkAction extends TransportWriteAction() { + @Override + public void onResponse(Void v) { + context.markAsRequiringMappingUpdate(); + waitForMappingUpdate.accept( + ActionListener.runAfter(new ActionListener() { + @Override + public void onResponse(Void v) { + assert context.requiresWaitingForMappingUpdate(); + context.resetForExecutionForRetry(); + } - if (context.requiresWaitingForMappingUpdate()) { - try { - waitForMappingUpdate.run(); - context.resetForExecutionForRetry(); - } catch (Exception e) { - context.failOnMappingUpdate(e); - } - return; + @Override + public void onFailure(Exception e) { + context.failOnMappingUpdate(e); + } + }, () -> itemDoneListener.onResponse(null)) + ); + } + + @Override + public void onFailure(Exception e) { + onComplete(exceptionToResult(e, primary, isDelete, version), context, updateResult); + // Requesting mapping update failed, so we don't have to wait for a cluster state update + assert context.isInitial(); + itemDoneListener.onResponse(null); + } + }); + return false; + } else { + onComplete(result, context, updateResult); } - - assert context.isOperationExecuted(); - - if (opType == DocWriteRequest.OpType.UPDATE && - context.getExecutionResult().isFailed() && - isConflictException(context.getExecutionResult().getFailure().getCause())) { - final UpdateRequest updateRequest = (UpdateRequest) context.getCurrent(); - if (context.getRetryCounter() < updateRequest.retryOnConflict()) { - context.resetForExecutionForRetry(); - return; - } - } - - finalizePrimaryOperationOnCompletion(context, opType, updateResult); + return true; } - private static void finalizePrimaryOperationOnCompletion(BulkPrimaryExecutionContext context, DocWriteRequest.OpType opType, - UpdateHelper.Result updateResult) { - final BulkItemResponse executionResult = context.getExecutionResult(); - if (opType == DocWriteRequest.OpType.UPDATE) { - final UpdateRequest updateRequest = (UpdateRequest) context.getCurrent(); - context.markAsCompleted( - processUpdateResponse(updateRequest, context.getConcreteIndex(), executionResult, updateResult)); - } else if (executionResult.isFailed()) { - final Exception failure = executionResult.getFailure().getCause(); - final DocWriteRequest docWriteRequest = context.getCurrent(); - if (TransportShardBulkAction.isConflictException(failure)) { - logger.trace(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - context.getPrimary().shardId(), docWriteRequest.opType().getLowercase(), docWriteRequest), failure); - } else { - logger.debug(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - context.getPrimary().shardId(), docWriteRequest.opType().getLowercase(), docWriteRequest), failure); - } + private static Engine.Result exceptionToResult(Exception e, IndexShard primary, boolean isDelete, long version) { + return isDelete ? primary.getFailedDeleteResult(e, version) : primary.getFailedIndexResult(e, version); + } - context.markAsCompleted(executionResult); - } else { - context.markAsCompleted(executionResult); + private static void onComplete(Engine.Result r, BulkPrimaryExecutionContext context, UpdateHelper.Result updateResult) { + context.markOperationAsExecuted(r); + final DocWriteRequest docWriteRequest = context.getCurrent(); + final DocWriteRequest.OpType opType = docWriteRequest.opType(); + final boolean isUpdate = opType == DocWriteRequest.OpType.UPDATE; + final BulkItemResponse executionResult = context.getExecutionResult(); + final boolean isFailed = executionResult.isFailed(); + if (isUpdate && isFailed && isConflictException(executionResult.getFailure().getCause()) + && context.getRetryCounter() < ((UpdateRequest) docWriteRequest).retryOnConflict()) { + context.resetForExecutionForRetry(); + return; } + final BulkItemResponse response; + if (isUpdate) { + response = processUpdateResponse((UpdateRequest) docWriteRequest, context.getConcreteIndex(), executionResult, updateResult); + } else { + if (isFailed) { + final Exception failure = executionResult.getFailure().getCause(); + final MessageSupplier messageSupplier = () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + context.getPrimary().shardId(), opType.getLowercase(), docWriteRequest); + if (TransportShardBulkAction.isConflictException(failure)) { + logger.trace(messageSupplier, failure); + } else { + logger.debug(messageSupplier, failure); + } + } + response = executionResult; + } + context.markAsCompleted(response); assert context.isInitial(); } @@ -278,7 +341,6 @@ public class TransportShardBulkAction extends TransportWriteAction - primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse, - request.ifSeqNo(), request.ifPrimaryTerm(), request.getAutoGeneratedTimestamp(), request.isRetry()), - e -> primary.getFailedIndexResult(e, request.version()), - context::markOperationAsExecuted, - mapping -> mappingUpdater.updateMappings(mapping, primary.shardId(), request.type())); - } - - private static void executeDeleteRequestOnPrimary(BulkPrimaryExecutionContext context, - MappingUpdatePerformer mappingUpdater) throws Exception { - final DeleteRequest request = context.getRequestToExecute(); - final IndexShard primary = context.getPrimary(); - executeOnPrimaryWhileHandlingMappingUpdates(context, - () -> primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(), - request.ifSeqNo(), request.ifPrimaryTerm()), - e -> primary.getFailedDeleteResult(e, request.version()), - context::markOperationAsExecuted, - mapping -> mappingUpdater.updateMappings(mapping, primary.shardId(), request.type())); - } - - private static void executeOnPrimaryWhileHandlingMappingUpdates( - BulkPrimaryExecutionContext context, CheckedSupplier toExecute, - Function exceptionToResult, Consumer onComplete, Consumer mappingUpdater) - throws IOException { - T result = toExecute.get(); - if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - // try to update the mappings and mark the context as needing to try again. - try { - mappingUpdater.accept(result.getRequiredMappingUpdate()); - context.markAsRequiringMappingUpdate(); - } catch (Exception e) { - // failure to update the mapping should translate to a failure of specific requests. Other requests - // still need to be executed and replicated. - onComplete.accept(exceptionToResult.apply(e)); - return; - } - } else { - onComplete.accept(result); - } - } - - class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { - - @Override - public void updateMappings(final Mapping update, final ShardId shardId, final String type) { - assert update != null; - assert shardId != null; - // can throw timeout exception when updating mappings or ISE for attempting to - // update default mappings which are bubbled up - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 89e20953493..bfe32749961 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -85,13 +85,13 @@ public class TransportResyncReplicationAction extends TransportWriteAction shardOperationOnPrimary( - ResyncReplicationRequest request, IndexShard primary) throws Exception { - final ResyncReplicationRequest replicaRequest = performOnPrimary(request, primary); - return new WritePrimaryResult<>(replicaRequest, new ResyncReplicationResponse(), null, null, primary, logger); + protected void shardOperationOnPrimary(ResyncReplicationRequest request, IndexShard primary, + ActionListener> listener) { + ActionListener.completeWith(listener, + () -> new WritePrimaryResult<>(performOnPrimary(request), new ResyncReplicationResponse(), null, null, primary, logger)); } - public static ResyncReplicationRequest performOnPrimary(ResyncReplicationRequest request, IndexShard primary) { + public static ResyncReplicationRequest performOnPrimary(ResyncReplicationRequest request) { return request; } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 22e90cfc135..c8c102dfd85 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -101,14 +101,17 @@ public class ReplicationOperation< totalShards.incrementAndGet(); pendingActions.incrementAndGet(); // increase by 1 until we finish all primary coordination - primaryResult = primary.perform(request); - primary.updateLocalCheckpointForShard(primaryRouting.allocationId().getId(), primary.localCheckpoint()); + primary.perform(request, ActionListener.wrap(this::handlePrimaryResult, resultListener::onFailure)); + } + + private void handlePrimaryResult(final PrimaryResultT primaryResult) { + this.primaryResult = primaryResult; + primary.updateLocalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.localCheckpoint()); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); if (replicaRequest != null) { if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + logger.trace("[{}] op [{}] completed on primary for request [{}]", primary.routingEntry().shardId(), opType, request); } - // we have to get the replication group after successfully indexing into the primary in order to honour recovery semantics. // we have to make sure that every operation indexed into the primary after recovery start will also be replicated // to the recovery target. If we used an old replication group, we may miss a recovery that has started since then. @@ -118,14 +121,14 @@ public class ReplicationOperation< // This would entail that some shards could learn about a global checkpoint that would be higher than its local checkpoint. final long globalCheckpoint = primary.globalCheckpoint(); // we have to capture the max_seq_no_of_updates after this request was completed on the primary to make sure the value of - // max_seq_no_of_updates on replica when this request is executed is at least the value on the primary when it was executed on. + // max_seq_no_of_updates on replica when this request is executed is at least the value on the primary when it was executed + // on. final long maxSeqNoOfUpdatesOrDeletes = primary.maxSeqNoOfUpdatesOrDeletes(); assert maxSeqNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "seqno_of_updates still uninitialized"; final ReplicationGroup replicationGroup = primary.getReplicationGroup(); markUnavailableShardsAsStale(replicaRequest, replicationGroup); performOnReplicas(replicaRequest, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, replicationGroup); } - successfulShards.incrementAndGet(); // mark primary as successful decPendingAndFinishIfNeeded(); } @@ -310,9 +313,9 @@ public class ReplicationOperation< * also complete after. Deal with it. * * @param request the request to perform - * @return the request to send to the replicas + * @param listener result listener */ - PrimaryResultT perform(RequestT request) throws Exception; + void perform(RequestT request, ActionListener listener); /** * Notifies the primary of a local checkpoint for the given allocation. diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 6df98fbf149..2e3440a9142 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.support.replication; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -190,8 +191,8 @@ public abstract class TransportReplicationAction< * @param shardRequest the request to the primary shard * @param primary the primary shard to perform the operation on */ - protected abstract PrimaryResult shardOperationOnPrimary( - Request shardRequest, IndexShard primary) throws Exception; + protected abstract void shardOperationOnPrimary(Request shardRequest, IndexShard primary, + ActionListener> listener); /** * Synchronously execute the specified replica operation. This is done under a permit from @@ -357,58 +358,54 @@ public abstract class TransportReplicationAction< }); } else { setPhase(replicationTask, "primary"); - final ActionListener listener = createResponseListener(primaryShardReference); createReplicatedOperation(primaryRequest.getRequest(), - ActionListener.wrap(result -> result.respond(listener), listener::onFailure), - primaryShardReference) - .execute(); + ActionListener.wrap(result -> result.respond( + new ActionListener() { + @Override + public void onResponse(Response response) { + if (syncGlobalCheckpointAfterOperation) { + final IndexShard shard = primaryShardReference.indexShard; + try { + shard.maybeSyncGlobalCheckpoint("post-operation"); + } catch (final Exception e) { + // only log non-closed exceptions + if (ExceptionsHelper.unwrap( + e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { + // intentionally swallow, a missed global checkpoint sync should not fail this operation + logger.info( + new ParameterizedMessage( + "{} failed to execute post-operation global checkpoint sync", shard.shardId()), e); + } + } + } + primaryShardReference.close(); // release shard operation lock before responding to caller + setPhase(replicationTask, "finished"); + onCompletionListener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + handleException(primaryShardReference, e); + } + }), e -> handleException(primaryShardReference, e) + ), primaryShardReference).execute(); } } catch (Exception e) { - Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller - onFailure(e); + handleException(primaryShardReference, e); } } + private void handleException(PrimaryShardReference primaryShardReference, Exception e) { + Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller + onFailure(e); + } + @Override public void onFailure(Exception e) { setPhase(replicationTask, "finished"); onCompletionListener.onFailure(e); } - private ActionListener createResponseListener(final PrimaryShardReference primaryShardReference) { - return new ActionListener() { - @Override - public void onResponse(Response response) { - if (syncGlobalCheckpointAfterOperation) { - final IndexShard shard = primaryShardReference.indexShard; - try { - shard.maybeSyncGlobalCheckpoint("post-operation"); - } catch (final Exception e) { - // only log non-closed exceptions - if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { - logger.info( - new ParameterizedMessage( - "{} failed to execute post-operation global checkpoint sync", - shard.shardId()), - e); - // intentionally swallow, a missed global checkpoint sync should not fail this operation - } - } - } - primaryShardReference.close(); // release shard operation lock before responding to caller - setPhase(replicationTask, "finished"); - onCompletionListener.onResponse(response); - } - - @Override - public void onFailure(Exception e) { - primaryShardReference.close(); // release shard operation lock before responding to caller - setPhase(replicationTask, "finished"); - onCompletionListener.onFailure(e); - } - }; - } - protected ReplicationOperation> createReplicatedOperation( Request request, ActionListener> listener, PrimaryShardReference primaryShardReference) { @@ -417,7 +414,7 @@ public abstract class TransportReplicationAction< } } - protected static class PrimaryResult, + public static class PrimaryResult, Response extends ReplicationResponse> implements ReplicationOperation.PrimaryResult { final ReplicaRequest replicaRequest; @@ -916,11 +913,15 @@ public abstract class TransportReplicationAction< } @Override - public PrimaryResult perform(Request request) throws Exception { - PrimaryResult result = shardOperationOnPrimary(request, indexShard); - assert result.replicaRequest() == null || result.finalFailure == null : "a replica request [" + result.replicaRequest() - + "] with a primary failure [" + result.finalFailure + "]"; - return result; + public void perform(Request request, ActionListener> listener) { + if (Assertions.ENABLED) { + listener = ActionListener.map(listener, result -> { + assert result.replicaRequest() == null || result.finalFailure == null : "a replica request [" + result.replicaRequest() + + "] with a primary failure [" + result.finalFailure + "]"; + return result; + }); + } + shardOperationOnPrimary(request, indexShard, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index cb3f67aa99e..15c49d10303 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -103,12 +103,12 @@ public abstract class TransportWriteAction< /** * Called on the primary with a reference to the primary {@linkplain IndexShard} to modify. * - * @return the result of the operation on primary, including current translog location and operation response and failure - * async refresh is performed on the primary shard according to the Request refresh policy + * @param listener listener for the result of the operation on primary, including current translog location and operation response + * and failure async refresh is performed on the primary shard according to the Request refresh policy */ @Override - protected abstract WritePrimaryResult shardOperationOnPrimary( - Request request, IndexShard primary) throws Exception; + protected abstract void shardOperationOnPrimary( + Request request, IndexShard primary, ActionListener> listener); /** * Called once per replica with a reference to the replica {@linkplain IndexShard} to modify. diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 14c360168f9..aeba27c4120 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,7 +19,9 @@ package org.elasticsearch.cluster.action.index; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; @@ -29,6 +31,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; @@ -57,34 +60,36 @@ public class MappingUpdatedAction { this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout; } - public void setClient(Client client) { this.client = client.admin().indices(); } - private PutMappingRequestBuilder updateMappingRequest(Index index, String type, Mapping mappingUpdate, final TimeValue timeout) { - if (type.equals(MapperService.DEFAULT_MAPPING)) { - throw new IllegalArgumentException("_default_ mapping should not be updated"); - } - return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString(), XContentType.JSON) - .setMasterNodeTimeout(timeout).setTimeout(TimeValue.ZERO); - } - - /** - * Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)} - * using the default timeout. - */ - public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) { - updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout); - } - /** * Update mappings on the master node, waiting for the change to be committed, * but not for the mapping update to be applied on all nodes. The timeout specified by * {@code timeout} is the master node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), * potentially waiting for a master node to be available. */ - public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue masterNodeTimeout) { - updateMappingRequest(index, type, mappingUpdate, masterNodeTimeout).get(); + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, ActionListener listener) { + if (type.equals(MapperService.DEFAULT_MAPPING)) { + throw new IllegalArgumentException("_default_ mapping should not be updated"); + } + client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString(), XContentType.JSON) + .setMasterNodeTimeout(dynamicMappingUpdateTimeout).setTimeout(TimeValue.ZERO) + .execute(new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(unwrapException(e)); + } + }); + } + + private static Exception unwrapException(Exception cause) { + return cause instanceof ElasticsearchException ? FutureUtils.unwrapEsException((ElasticsearchException) cause) : cause; } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java index c7345aa3b63..15e26779071 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java @@ -88,17 +88,19 @@ public class FutureUtils { public static RuntimeException rethrowExecutionException(ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { ElasticsearchException esEx = (ElasticsearchException) e.getCause(); - Throwable root = esEx.unwrapCause(); - if (root instanceof ElasticsearchException) { - return (ElasticsearchException) root; - } else if (root instanceof RuntimeException) { - return (RuntimeException) root; - } - return new UncategorizedExecutionException("Failed execution", root); + return unwrapEsException(esEx); } else if (e.getCause() instanceof RuntimeException) { return (RuntimeException) e.getCause(); } else { return new UncategorizedExecutionException("Failed execution", e); } } + + public static RuntimeException unwrapEsException(ElasticsearchException esEx) { + Throwable root = esEx.unwrapCause(); + if (root instanceof ElasticsearchException || root instanceof RuntimeException) { + return (RuntimeException) root; + } + return new UncategorizedExecutionException("Failed execution", root); + } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index b7f08a36ac0..d67cbc833d6 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -102,10 +102,12 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction< } @Override - protected PrimaryResult shardOperationOnPrimary( - final Request request, final IndexShard indexShard) throws Exception { - maybeSyncTranslog(indexShard); - return new PrimaryResult<>(request, new ReplicationResponse()); + protected void shardOperationOnPrimary(Request request, IndexShard indexShard, + ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + maybeSyncTranslog(indexShard); + return new PrimaryResult<>(request, new ReplicationResponse()); + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 570159cc74d..9be2e550573 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -127,14 +127,16 @@ public class RetentionLeaseBackgroundSyncAction extends TransportReplicationActi } @Override - protected PrimaryResult shardOperationOnPrimary( + protected void shardOperationOnPrimary( final Request request, - final IndexShard primary) throws WriteStateException { - assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards(); - Objects.requireNonNull(request); - Objects.requireNonNull(primary); - primary.persistRetentionLeases(); - return new PrimaryResult<>(request, new ReplicationResponse()); + final IndexShard primary, ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards(); + Objects.requireNonNull(request); + Objects.requireNonNull(primary); + primary.persistRetentionLeases(); + return new PrimaryResult<>(request, new ReplicationResponse()); + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index a8aa7fe6f8e..6129affe493 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -123,14 +123,15 @@ public class RetentionLeaseSyncAction extends } @Override - protected WritePrimaryResult shardOperationOnPrimary( - final Request request, - final IndexShard primary) throws WriteStateException { - assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards(); - Objects.requireNonNull(request); - Objects.requireNonNull(primary); - primary.persistRetentionLeases(); - return new WritePrimaryResult<>(request, new Response(), null, null, primary, getLogger()); + protected void shardOperationOnPrimary(Request request, IndexShard primary, + ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards(); + Objects.requireNonNull(request); + Objects.requireNonNull(primary); + primary.persistRetentionLeases(); + return new WritePrimaryResult<>(request, new Response(), null, null, primary, getLogger()); + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 77bddbe2156..bef05ecda9f 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -110,7 +110,7 @@ public class IndicesModule extends AbstractModule { ); } - private Map getMappers(List mapperPlugins) { + public static Map getMappers(List mapperPlugins) { Map mappers = new LinkedHashMap<>(); // builtin mappers @@ -168,7 +168,7 @@ public class IndicesModule extends AbstractModule { return Collections.unmodifiableMap(builtInMetadataMappers); } - private static Map getMetadataMappers(List mapperPlugins) { + public static Map getMetadataMappers(List mapperPlugins) { Map metadataMappers = new LinkedHashMap<>(); int i = 0; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 687b0168070..f90f40311c1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.close; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; @@ -60,6 +61,7 @@ import org.mockito.ArgumentCaptor; import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; @@ -133,18 +135,28 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { threadPool = null; } - private void executeOnPrimaryOrReplica() throws Exception { + private void executeOnPrimaryOrReplica() throws Throwable { final TaskId taskId = new TaskId("_node_id", randomNonNegativeLong()); final TransportVerifyShardBeforeCloseAction.ShardRequest request = new TransportVerifyShardBeforeCloseAction.ShardRequest(indexShard.shardId(), clusterBlock, taskId); - if (randomBoolean()) { - assertNotNull(action.shardOperationOnPrimary(request, indexShard)); - } else { - assertNotNull(action.shardOperationOnPrimary(request, indexShard)); + final PlainActionFuture res = PlainActionFuture.newFuture(); + action.shardOperationOnPrimary(request, indexShard, ActionListener.wrap( + r -> { + assertNotNull(r); + res.onResponse(null); + }, + res::onFailure + )); + try { + res.get(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } catch (ExecutionException e) { + throw e.getCause(); } } - public void testShardIsFlushed() throws Exception { + public void testShardIsFlushed() throws Throwable { final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); when(indexShard.flush(flushRequest.capture())).thenReturn(new Engine.CommitId(new byte[0])); @@ -171,7 +183,7 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { verify(indexShard, times(0)).flush(any(FlushRequest.class)); } - public void testVerifyShardBeforeIndexClosing() throws Exception { + public void testVerifyShardBeforeIndexClosing() throws Throwable { executeOnPrimaryOrReplica(); verify(indexShard, times(1)).verifyShardBeforeIndexClosing(); verify(indexShard, times(1)).flush(any(FlushRequest.class)); @@ -271,8 +283,9 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { } @Override - public PrimaryResult perform(TransportVerifyShardBeforeCloseAction.ShardRequest request) throws Exception { - return new PrimaryResult(request); + public void perform( + TransportVerifyShardBeforeCloseAction.ShardRequest request, ActionListener listener) { + listener.onResponse(new PrimaryResult(request)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 0cc83cee897..7038505ff6f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -77,7 +77,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; - public class SplitIndexIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java new file mode 100644 index 00000000000..c82d825b942 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collections; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 2) +public class BulkRejectionIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("thread_pool.write.size", 1) + .put("thread_pool.write.queue_size", 1) + .build(); + } + + @Override + protected int numberOfReplicas() { + return 1; + } + + protected int numberOfShards() { + return 5; + } + + public void testBulkRejectionAfterDynamicMappingUpdate() throws Exception { + final String index = "test"; + assertAcked(prepareCreate(index)); + ensureGreen(); + final BulkRequest request1 = new BulkRequest(); + for (int i = 0; i < 500; ++i) { + request1.add(new IndexRequest(index).source(Collections.singletonMap("key" + i, "value" + i))) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + } + // Huge request to keep the write pool busy so that requests waiting on a mapping update in the other bulk request get rejected + // by the write pool + final BulkRequest request2 = new BulkRequest(); + for (int i = 0; i < 10_000; ++i) { + request2.add(new IndexRequest(index).source(Collections.singletonMap("key", "valuea" + i))) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + } + final ActionFuture bulkFuture1 = client().bulk(request1); + final ActionFuture bulkFuture2 = client().bulk(request2); + bulkFuture1.actionGet(); + bulkFuture2.actionGet(); + internalCluster().assertSeqNos(); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 610a72de6ec..62217f78731 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -22,13 +22,16 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.bulk.TransportShardBulkAction.ReplicaItemExecutionMode; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.TransportWriteAction.WritePrimaryResult; import org.elasticsearch.action.update.UpdateHelper; @@ -75,6 +78,8 @@ import static org.mockito.Mockito.when; public class TransportShardBulkActionTests extends IndexShardTestCase { + private static final ActionListener ASSERTING_DONE_LISTENER = ActionTestUtils.assertNoFailureListener(r -> {}); + private final ShardId shardId = new ShardId("index", "_na_", 0); private final Settings idxSettings = Settings.builder() .put("index.number_of_shards", 1) @@ -146,10 +151,9 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { randomlySetIgnoredPrimaryResponse(primaryRequest); - UpdateHelper updateHelper = null; BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); - TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); // Translog should change, since there were no problems @@ -174,8 +178,9 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { randomlySetIgnoredPrimaryResponse(primaryRequest); BulkPrimaryExecutionContext secondContext = new BulkPrimaryExecutionContext(bulkShardRequest, shard); - TransportShardBulkAction.executeBulkItemRequest(secondContext, updateHelper, - threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(new RuntimeException("fail")), () -> {}); + TransportShardBulkAction.executeBulkItemRequest(secondContext, null, + threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(new RuntimeException("fail")), + listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); assertNull(secondContext.getLocationToSync()); @@ -224,37 +229,44 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { final ElasticsearchStatusException rejectionCause = new ElasticsearchStatusException("testing rejection", rejectionStatus); rejectItem.abort("index", rejectionCause); - UpdateHelper updateHelper = null; - WritePrimaryResult result = TransportShardBulkAction.performOnPrimary( - bulkShardRequest, shard, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - () -> {}); + final CountDownLatch latch = new CountDownLatch(1); + TransportShardBulkAction.performOnPrimary( + bulkShardRequest, shard, null, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), + listener -> {}, ActionListener.runAfter( + ActionTestUtils.assertNoFailureListener(result -> { + // since at least 1 item passed, the tran log location should exist, + assertThat(((WritePrimaryResult) result).location, notNullValue()); + // and the response should exist and match the item count + assertThat(result.finalResponseIfSuccessful, notNullValue()); + assertThat(result.finalResponseIfSuccessful.getResponses(), arrayWithSize(items.length)); - // since at least 1 item passed, the tran log location should exist, - assertThat(result.location, notNullValue()); - // and the response should exist and match the item count - assertThat(result.finalResponseIfSuccessful, notNullValue()); - assertThat(result.finalResponseIfSuccessful.getResponses(), arrayWithSize(items.length)); + // check each response matches the input item, including the rejection + for (int i = 0; i < items.length; i++) { + BulkItemResponse response = result.finalResponseIfSuccessful.getResponses()[i]; + assertThat(response.getItemId(), equalTo(i)); + assertThat(response.getIndex(), equalTo("index")); + assertThat(response.getType(), equalTo("_doc")); + assertThat(response.getId(), equalTo("id_" + i)); + assertThat(response.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); + if (response.getItemId() == rejectItem.id()) { + assertTrue(response.isFailed()); + assertThat(response.getFailure().getCause(), equalTo(rejectionCause)); + assertThat(response.status(), equalTo(rejectionStatus)); + } else { + assertFalse(response.isFailed()); + } + } - // check each response matches the input item, including the rejection - for (int i = 0; i < items.length; i++) { - BulkItemResponse response = result.finalResponseIfSuccessful.getResponses()[i]; - assertThat(response.getItemId(), equalTo(i)); - assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("_doc")); - assertThat(response.getId(), equalTo("id_" + i)); - assertThat(response.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); - if (response.getItemId() == rejectItem.id()) { - assertTrue(response.isFailed()); - assertThat(response.getFailure().getCause(), equalTo(rejectionCause)); - assertThat(response.status(), equalTo(rejectionStatus)); - } else { - assertFalse(response.isFailed()); - } - } + // Check that the non-rejected updates made it to the shard + try { + assertDocCount(shard, items.length - 1); + closeShards(shard); + } catch (IOException e) { + throw new AssertionError(e); + } + }), latch::countDown), threadPool); - // Check that the non-rejected updates made it to the shard - assertDocCount(shard, items.length - 1); - closeShards(shard); + latch.await(); } public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { @@ -281,11 +293,12 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); AtomicInteger updateCalled = new AtomicInteger(); TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, - (update, shardId, type) -> { + (update, shardId, type, listener) -> { // There should indeed be a mapping update assertNotNull(update); updateCalled.incrementAndGet(); - }, () -> {}); + listener.onResponse(null); + }, listener -> listener.onResponse(null), ASSERTING_DONE_LISTENER); assertTrue(context.isInitial()); assertTrue(context.hasMoreOperationsToExecute()); @@ -298,7 +311,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { .thenReturn(success); TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, - (update, shardId, type) -> fail("should not have had to update the mappings"), () -> {}); + (update, shardId, type, listener) -> fail("should not have had to update the mappings"), listener -> {}, + ASSERTING_DONE_LISTENER); // Verify that the shard "executed" the operation only once (1 for previous invocations plus @@ -325,8 +339,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - UpdateHelper updateHelper = null; - // Return an exception when trying to update the mapping, or when waiting for it to come RuntimeException err = new RuntimeException("some kind of exception"); @@ -335,9 +347,22 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { randomlySetIgnoredPrimaryResponse(items[0]); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); - TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, + final CountDownLatch latch = new CountDownLatch(1); + TransportShardBulkAction.executeBulkItemRequest( + context, null, threadPool::relativeTimeInMillis, errorOnWait == false ? new ThrowingMappingUpdatePerformer(err) : new NoopMappingUpdatePerformer(), - errorOnWait ? () -> { throw err; } : () -> {}); + errorOnWait ? listener -> listener.onFailure(err) : listener -> listener.onResponse(null), + new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(Void aVoid) { + } + + @Override + public void onFailure(final Exception e) { + assertEquals(err, e); + } + }, latch)); + latch.await(); assertFalse(context.hasMoreOperationsToExecute()); // Translog shouldn't be synced, as there were conflicting mappings @@ -371,13 +396,12 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); - UpdateHelper updateHelper = null; randomlySetIgnoredPrimaryResponse(items[0]); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); - TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); // Translog changes, even though the document didn't exist @@ -418,8 +442,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { randomlySetIgnoredPrimaryResponse(items[0]); context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); - TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); // Translog changes, because the document was deleted @@ -475,7 +499,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); @@ -521,7 +545,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); // Since this was not a conflict failure, the primary response @@ -571,7 +595,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> listener.onResponse(null), ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); assertNull(context.getLocationToSync()); @@ -618,7 +642,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); // Check that the translog is successfully advanced @@ -664,7 +688,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> listener.onResponse(null), ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); // Check that the translog is successfully advanced @@ -698,7 +722,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); assertFalse(context.hasMoreOperationsToExecute()); assertNull(context.getLocationToSync()); @@ -731,7 +755,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); while (context.hasMoreOperationsToExecute()) { TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, - new NoopMappingUpdatePerformer(), () -> {}); + new NoopMappingUpdatePerformer(), listener -> {}, ASSERTING_DONE_LISTENER); } assertTrue(shard.isSyncNeeded()); @@ -814,18 +838,22 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - WritePrimaryResult result = TransportShardBulkAction.performOnPrimary( + final CountDownLatch latch = new CountDownLatch(1); + TransportShardBulkAction.performOnPrimary( bulkShardRequest, shard, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - () -> {}); - - assertThat(result.location, equalTo(resultLocation)); - BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse(); - assertThat(primaryResponse.getItemId(), equalTo(0)); - assertThat(primaryResponse.getId(), equalTo("id")); - assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); - DocWriteResponse response = primaryResponse.getResponse(); - assertThat(response.status(), equalTo(RestStatus.CREATED)); - assertThat(response.getSeqNo(), equalTo(13L)); + listener -> listener.onResponse(null), + new LatchedActionListener<>( + ActionTestUtils.assertNoFailureListener(result -> { + assertThat(((WritePrimaryResult) result).location, equalTo(resultLocation)); + BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse(); + assertThat(primaryResponse.getItemId(), equalTo(0)); + assertThat(primaryResponse.getId(), equalTo("id")); + assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); + DocWriteResponse response = primaryResponse.getResponse(); + assertThat(response.status(), equalTo(RestStatus.CREATED)); + assertThat(response.getSeqNo(), equalTo(13L)); + }), latch), threadPool); + latch.await(); } private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { @@ -875,7 +903,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { /** Doesn't perform any mapping updates */ public static class NoopMappingUpdatePerformer implements MappingUpdatePerformer { @Override - public void updateMappings(Mapping update, ShardId shardId, String type) { + public void updateMappings(Mapping update, ShardId shardId, String type, ActionListener listener) { + listener.onResponse(null); } } @@ -888,8 +917,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { } @Override - public void updateMappings(Mapping update, ShardId shardId, String type) { - throw e; + public void updateMappings(Mapping update, ShardId shardId, String type, ActionListener listener) { + listener.onFailure(e); } } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 3af5047fe22..d493b703372 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -287,11 +287,12 @@ public class ReplicationOperationTests extends ESTestCase { final ShardRouting primaryShard = updatedReplicationGroup.getRoutingTable().primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, replicationGroup::get) { @Override - public Result perform(Request request) throws Exception { - Result result = super.perform(request); - replicationGroup.set(updatedReplicationGroup); - logger.debug("--> state after primary operation:\n{}", replicationGroup.get()); - return result; + public void perform(Request request, ActionListener listener) { + super.perform(request, ActionListener.map(listener, result -> { + replicationGroup.set(updatedReplicationGroup); + logger.debug("--> state after primary operation:\n{}", replicationGroup.get()); + return result; + })); } }; @@ -481,11 +482,11 @@ public class ReplicationOperationTests extends ESTestCase { } @Override - public Result perform(Request request) throws Exception { + public void perform(Request request, ActionListener listener) { if (request.processedOnPrimary.compareAndSet(false, true) == false) { fail("processed [" + request + "] twice"); } - return new Result(request); + listener.onResponse(new Result(request)); } static class Result implements ReplicationOperation.PrimaryResult { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index a663841ac6a..e256ec4e926 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ReplicationOperation.ReplicaResponse; @@ -691,16 +692,16 @@ public class TransportReplicationActionTests extends ESTestCase { }; TestAction.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable); final Request request = new Request(NO_SHARD_ID); - primary.perform(request); + primary.perform(request, ActionTestUtils.assertNoFailureListener(r -> { + final ElasticsearchException exception = new ElasticsearchException("testing"); + primary.failShard("test", exception); - final ElasticsearchException exception = new ElasticsearchException("testing"); - primary.failShard("test", exception); + verify(shard).failShard("test", exception); - verify(shard).failShard("test", exception); + primary.close(); - primary.close(); - - assertTrue(closed.get()); + assertTrue(closed.get()); + })); } public void testReplicaProxy() throws InterruptedException, ExecutionException { @@ -1229,10 +1230,11 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard primary) { + protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary, + ActionListener> listener) { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; - return new PrimaryResult<>(shardRequest, new TestResponse()); + listener.onResponse(new PrimaryResult<>(shardRequest, new TestResponse())); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 15886a517d3..8463d66e98e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -439,12 +439,13 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe } @Override - protected PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard shard) throws Exception { + protected void shardOperationOnPrimary(Request shardRequest, IndexShard shard, + ActionListener> listener) { executedOnPrimary.set(true); // The TransportReplicationAction.getIndexShard() method is overridden for testing purpose but we double check here // that the permit has been acquired on the primary shard assertSame(primary, shard); - return new PrimaryResult<>(shardRequest, new Response()); + listener.onResponse(new PrimaryResult<>(shardRequest, new Response())); } @Override @@ -499,10 +500,11 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe } @Override - protected PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard shard) throws Exception { + protected void shardOperationOnPrimary(Request shardRequest, IndexShard shard, + ActionListener> listener) { assertNoBlocks("block must not exist when executing the operation on primary shard: it should have been blocked before"); assertThat(shard.getActiveOperationsCount(), greaterThan(0)); - return super.shardOperationOnPrimary(shardRequest, shard); + super.shardOperationOnPrimary(shardRequest, shard, listener); } @Override @@ -545,9 +547,10 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe } @Override - protected PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard shard) throws Exception { + protected void shardOperationOnPrimary(Request shardRequest, IndexShard shard, + ActionListener> listener) { assertEquals("All permits must be acquired", 0, shard.getActiveOperationsCount()); - return super.shardOperationOnPrimary(shardRequest, shard); + super.shardOperationOnPrimary(shardRequest, shard, listener); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 5a35202506d..98c4f215fca 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; @@ -140,14 +141,15 @@ public class TransportWriteActionTests extends ESTestCase { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.NONE); // The default, but we'll set it anyway just to be explicit TestAction testAction = new TestAction(); - TransportWriteAction.WritePrimaryResult result = - testAction.shardOperationOnPrimary(request, indexShard); - CapturingActionListener listener = new CapturingActionListener<>(); - result.respond(listener); - assertNotNull(listener.response); - assertNull(listener.failure); - verify(indexShard, never()).refresh(any()); - verify(indexShard, never()).addRefreshListener(any(), any()); + testAction.shardOperationOnPrimary(request, indexShard, + ActionTestUtils.assertNoFailureListener(result -> { + CapturingActionListener listener = new CapturingActionListener<>(); + result.respond(listener); + assertNotNull(listener.response); + assertNull(listener.failure); + verify(indexShard, never()).refresh(any()); + verify(indexShard, never()).addRefreshListener(any(), any()); + })); } public void testReplicaNoRefreshCall() throws Exception { @@ -168,15 +170,16 @@ public class TransportWriteActionTests extends ESTestCase { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.IMMEDIATE); TestAction testAction = new TestAction(); - TransportWriteAction.WritePrimaryResult result = - testAction.shardOperationOnPrimary(request, indexShard); - CapturingActionListener listener = new CapturingActionListener<>(); - result.respond(listener); - assertNotNull(listener.response); - assertNull(listener.failure); - assertTrue(listener.response.forcedRefresh); - verify(indexShard).refresh("refresh_flag_index"); - verify(indexShard, never()).addRefreshListener(any(), any()); + testAction.shardOperationOnPrimary(request, indexShard, + ActionTestUtils.assertNoFailureListener(result -> { + CapturingActionListener listener = new CapturingActionListener<>(); + result.respond(listener); + assertNotNull(listener.response); + assertNull(listener.failure); + assertTrue(listener.response.forcedRefresh); + verify(indexShard).refresh("refresh_flag_index"); + verify(indexShard, never()).addRefreshListener(any(), any()); + })); } public void testReplicaImmediateRefresh() throws Exception { @@ -198,23 +201,24 @@ public class TransportWriteActionTests extends ESTestCase { request.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); TestAction testAction = new TestAction(); - TransportWriteAction.WritePrimaryResult result = - testAction.shardOperationOnPrimary(request, indexShard); - CapturingActionListener listener = new CapturingActionListener<>(); - result.respond(listener); - assertNull(listener.response); // Haven't reallresponded yet + testAction.shardOperationOnPrimary(request, indexShard, + ActionTestUtils.assertNoFailureListener(result -> { + CapturingActionListener listener = new CapturingActionListener<>(); + result.respond(listener); + assertNull(listener.response); // Haven't really responded yet - @SuppressWarnings({ "unchecked", "rawtypes" }) - ArgumentCaptor> refreshListener = ArgumentCaptor.forClass((Class) Consumer.class); - verify(indexShard, never()).refresh(any()); - verify(indexShard).addRefreshListener(any(), refreshListener.capture()); + @SuppressWarnings({"unchecked", "rawtypes"}) + ArgumentCaptor> refreshListener = ArgumentCaptor.forClass((Class) Consumer.class); + verify(indexShard, never()).refresh(any()); + verify(indexShard).addRefreshListener(any(), refreshListener.capture()); - // Now we can fire the listener manually and we'll get a response - boolean forcedRefresh = randomBoolean(); - refreshListener.getValue().accept(forcedRefresh); - assertNotNull(listener.response); - assertNull(listener.failure); - assertEquals(forcedRefresh, listener.response.forcedRefresh); + // Now we can fire the listener manually and we'll get a response + boolean forcedRefresh = randomBoolean(); + refreshListener.getValue().accept(forcedRefresh); + assertNotNull(listener.response); + assertNull(listener.failure); + assertEquals(forcedRefresh, listener.response.forcedRefresh); + })); } public void testReplicaWaitForRefresh() throws Exception { @@ -240,12 +244,13 @@ public class TransportWriteActionTests extends ESTestCase { public void testDocumentFailureInShardOperationOnPrimary() throws Exception { TestRequest request = new TestRequest(); TestAction testAction = new TestAction(true, true); - TransportWriteAction.WritePrimaryResult writePrimaryResult = - testAction.shardOperationOnPrimary(request, indexShard); - CapturingActionListener listener = new CapturingActionListener<>(); - writePrimaryResult.respond(listener); - assertNull(listener.response); - assertNotNull(listener.failure); + testAction.shardOperationOnPrimary(request, indexShard, + ActionTestUtils.assertNoFailureListener(writePrimaryResult -> { + CapturingActionListener listener = new CapturingActionListener<>(); + writePrimaryResult.respond(listener); + assertNull(listener.response); + assertNotNull(listener.failure); + })); } public void testDocumentFailureInShardOperationOnReplica() throws Exception { @@ -426,15 +431,15 @@ public class TransportWriteActionTests extends ESTestCase { } @Override - protected WritePrimaryResult shardOperationOnPrimary( - TestRequest request, IndexShard primary) throws Exception { - final WritePrimaryResult primaryResult; - if (withDocumentFailureOnPrimary) { - primaryResult = new WritePrimaryResult<>(request, null, null, new RuntimeException("simulated"), primary, logger); - } else { - primaryResult = new WritePrimaryResult<>(request, new TestResponse(), location, null, primary, logger); - } - return primaryResult; + protected void shardOperationOnPrimary( + TestRequest request, IndexShard primary, ActionListener> listener) { + ActionListener.completeWith(listener, () -> { + if (withDocumentFailureOnPrimary) { + return new WritePrimaryResult<>(request, null, null, new RuntimeException("simulated"), primary, logger); + } else { + return new WritePrimaryResult<>(request, new TestResponse(), location, null, primary, logger); + } + }); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 97fc9e9defa..cec3c05b284 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -18,6 +18,7 @@ package org.elasticsearch.index.seqno; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -113,7 +114,7 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase { new IndexNameExpressionResolver()); final GlobalCheckpointSyncAction.Request primaryRequest = new GlobalCheckpointSyncAction.Request(indexShard.shardId()); if (randomBoolean()) { - action.shardOperationOnPrimary(primaryRequest, indexShard); + action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); } else { action.shardOperationOnReplica(new GlobalCheckpointSyncAction.Request(indexShard.shardId()), indexShard); } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 81ea56c6096..f1c05c565f8 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -21,8 +21,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.ReplicationOperation; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -47,6 +48,7 @@ import org.elasticsearch.transport.TransportService; import org.mockito.ArgumentCaptor; import java.util.Collections; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; @@ -67,6 +69,7 @@ public class RetentionLeaseBackgroundSyncActionTests extends ESTestCase { private TransportService transportService; private ShardStateAction shardStateAction; + @Override public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(getClass().getName()); @@ -84,6 +87,7 @@ public class RetentionLeaseBackgroundSyncActionTests extends ESTestCase { shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); } + @Override public void tearDown() throws Exception { try { IOUtils.close(transportService, clusterService, transport); @@ -93,7 +97,7 @@ public class RetentionLeaseBackgroundSyncActionTests extends ESTestCase { super.tearDown(); } - public void testRetentionLeaseBackgroundSyncActionOnPrimary() throws WriteStateException { + public void testRetentionLeaseBackgroundSyncActionOnPrimary() throws InterruptedException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -120,12 +124,15 @@ public class RetentionLeaseBackgroundSyncActionTests extends ESTestCase { final RetentionLeaseBackgroundSyncAction.Request request = new RetentionLeaseBackgroundSyncAction.Request(indexShard.shardId(), retentionLeases); - final ReplicationOperation.PrimaryResult result = - action.shardOperationOnPrimary(request, indexShard); - // the retention leases on the shard should be persisted - verify(indexShard).persistRetentionLeases(); - // we should forward the request containing the current retention leases to the replica - assertThat(result.replicaRequest(), sameInstance(request)); + final CountDownLatch latch = new CountDownLatch(1); + action.shardOperationOnPrimary(request, indexShard, + new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); + // we should forward the request containing the current retention leases to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + }), latch)); + latch.await(); } public void testRetentionLeaseBackgroundSyncActionOnReplica() throws WriteStateException { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 9b9ad6a0962..73387d69b1e 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -90,7 +91,7 @@ public class RetentionLeaseSyncActionTests extends ESTestCase { super.tearDown(); } - public void testRetentionLeaseSyncActionOnPrimary() throws WriteStateException { + public void testRetentionLeaseSyncActionOnPrimary() { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -115,15 +116,16 @@ public class RetentionLeaseSyncActionTests extends ESTestCase { new IndexNameExpressionResolver()); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); - - final TransportWriteAction.WritePrimaryResult result = - action.shardOperationOnPrimary(request, indexShard); - // the retention leases on the shard should be persisted - verify(indexShard).persistRetentionLeases(); - // we should forward the request containing the current retention leases to the replica - assertThat(result.replicaRequest(), sameInstance(request)); - // we should start with an empty replication response - assertNull(result.finalResponseIfSuccessful.getShardInfo()); + action.shardOperationOnPrimary(request, indexShard, + ActionTestUtils.assertNoFailureListener(result -> { + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); + // we should forward the request containing the current retention leases to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + // we should start with an empty replication response + assertNull(result.finalResponseIfSuccessful.getShardInfo()); + } + )); } public void testRetentionLeaseSyncActionOnReplica() throws WriteStateException { @@ -156,7 +158,7 @@ public class RetentionLeaseSyncActionTests extends ESTestCase { action.shardOperationOnReplica(request, indexShard); // the retention leases on the shard should be updated verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases); - // the retention leases on the shard should be persisteed + // the retention leases on the shard should be persisted verify(indexShard).persistRetentionLeases(); // the result should indicate success final AtomicBoolean success = new AtomicBoolean(); diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java index cdc76292be0..55c1acabd3f 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java @@ -19,8 +19,10 @@ package org.elasticsearch.action.support; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.CheckedConsumer; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; @@ -34,4 +36,10 @@ public class ActionTestUtils { action.execute(request, future); return future.actionGet(); } + + public static ActionListener assertNoFailureListener(CheckedConsumer consumer) { + return ActionListener.wrap(consumer, e -> { + throw new AssertionError(e); + }); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 9b70aefa56b..c78079169ed 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -36,12 +36,14 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.resync.ResyncReplicationRequest; import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.support.replication.TransportReplicationAction.ReplicaResponse; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper; @@ -189,14 +191,15 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase sync(shardId, retentionLeases, ActionListener.wrap( r -> { }, e -> { - throw new AssertionError("failed to backgroun sync retention lease", e); + throw new AssertionError("failed to background sync retention lease", e); })); } }; protected ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); - primary = newShard(primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer); + primary = newShard( + primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer); replicas = new CopyOnWriteArrayList<>(); this.indexMetaData = indexMetaData; updateAllocationIDsOnPrimary(); @@ -252,9 +255,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase private BulkItemResponse executeWriteRequest( DocWriteRequest writeRequest, WriteRequest.RefreshPolicy refreshPolicy) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); - final ActionListener wrapBulkListener = ActionListener.wrap( - bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), - listener::onFailure); + final ActionListener wrapBulkListener = + ActionListener.map(listener, bulkShardResponse -> bulkShardResponse.getResponses()[0]); BulkItemRequest[] items = new BulkItemRequest[1]; items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest request = new BulkShardRequest(shardId, refreshPolicy, items); @@ -307,8 +309,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } public synchronized void addReplica(IndexShard replica) throws IOException { - assert shardRoutings().stream() - .filter(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())).findFirst().isPresent() == false : + assert shardRoutings().stream().anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false : "replica with aId [" + replica.routingEntry().allocationId() + "] already exists"; replicas.add(replica); if (replicationTargets != null) { @@ -433,7 +434,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } public Future asyncRecoverReplica( - final IndexShard replica, final BiFunction targetSupplier) throws IOException { + final IndexShard replica, final BiFunction targetSupplier) { final FutureTask task = new FutureTask<>(() -> { recoverReplica(replica, targetSupplier); return null; @@ -531,10 +532,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { - RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(shardId, leases); - ActionListener wrappedListener = ActionListener.wrap( - r -> listener.onResponse(new ReplicationResponse()), listener::onFailure); - new SyncRetentionLeases(request, ReplicationGroup.this, wrappedListener).execute(); + new SyncRetentionLeases(new RetentionLeaseSyncAction.Request(shardId, leases), this, + ActionListener.map(listener, r -> new ReplicationResponse())).execute(); } public synchronized RetentionLease addRetentionLease(String id, long retainingSequenceNumber, String source, @@ -609,17 +608,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase public void execute() { try { new ReplicationOperation<>(request, new PrimaryRef(), - new ActionListener() { - @Override - public void onResponse(PrimaryResult result) { - result.respond(listener); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }, new ReplicasRef(), logger, opType).execute(); + ActionListener.wrap(result -> result.respond(listener), listener::onFailure), new ReplicasRef(), logger, opType + ).execute(); } catch (Exception e) { listener.onFailure(e); } @@ -629,7 +619,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase return replicationTargets.primary; } - protected abstract PrimaryResult performOnPrimary(IndexShard primary, Request request) throws Exception; + protected abstract void performOnPrimary(IndexShard primary, Request request, ActionListener listener); protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica) throws Exception; @@ -646,8 +636,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } @Override - public PrimaryResult perform(Request request) throws Exception { - return performOnPrimary(getPrimaryShard(), request); + public void perform(Request request, ActionListener listener) { + performOnPrimary(getPrimaryShard(), request, listener); } @Override @@ -761,10 +751,9 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } @Override - protected PrimaryResult performOnPrimary(IndexShard primary, BulkShardRequest request) throws Exception { - final TransportWriteAction.WritePrimaryResult - result = executeShardBulkOnPrimary(primary, request); - return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + protected void performOnPrimary(IndexShard primary, BulkShardRequest request, ActionListener listener) { + executeShardBulkOnPrimary(primary, request, + ActionListener.map(listener, result -> new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful))); } @Override @@ -774,8 +763,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } } - private TransportWriteAction.WritePrimaryResult executeShardBulkOnPrimary( - IndexShard primary, BulkShardRequest request) throws Exception { + private void executeShardBulkOnPrimary(IndexShard primary, BulkShardRequest request, + ActionListener> listener) { for (BulkItemRequest itemRequest : request.items()) { if (itemRequest.request() instanceof IndexRequest) { ((IndexRequest) itemRequest.request()).process(Version.CURRENT, null, index.getName()); @@ -783,21 +772,27 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } final PlainActionFuture permitAcquiredFuture = new PlainActionFuture<>(); primary.acquirePrimaryOperationPermit(permitAcquiredFuture, ThreadPool.Names.SAME, request); - final TransportWriteAction.WritePrimaryResult result; try (Releasable ignored = permitAcquiredFuture.actionGet()) { - MappingUpdatePerformer noopMappingUpdater = (update, shardId, type) -> { }; - result = TransportShardBulkAction.performOnPrimary(request, primary, null, System::currentTimeMillis, noopMappingUpdater, - null); + MappingUpdatePerformer noopMappingUpdater = (update, shardId, type, listener1) -> {}; + TransportShardBulkAction.performOnPrimary(request, primary, null, System::currentTimeMillis, noopMappingUpdater, + null, ActionTestUtils.assertNoFailureListener(result -> { + TransportWriteActionTestHelper.performPostWriteActions(primary, request, + ((TransportWriteAction.WritePrimaryResult) result).location, logger); + listener.onResponse((TransportWriteAction.WritePrimaryResult) result); + }), threadPool); + } catch (Exception e) { + listener.onFailure(e); } - TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.location, logger); - return result; } - private - BulkShardRequest executeReplicationRequestOnPrimary(IndexShard primary, Request request) throws Exception { + private BulkShardRequest executeReplicationRequestOnPrimary( + IndexShard primary, Request request) throws Exception { final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), new BulkItemRequest[]{new BulkItemRequest(0, request)}); - return executeShardBulkOnPrimary(primary, bulkShardRequest).replicaRequest(); + final PlainActionFuture res = new PlainActionFuture<>(); + executeShardBulkOnPrimary( + primary, bulkShardRequest, ActionListener.map(res, TransportReplicationAction.PrimaryResult::replicaRequest)); + return res.get(); } private void executeShardBulkOnReplica(BulkShardRequest request, IndexShard replica, long operationPrimaryTerm, @@ -860,10 +855,12 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } @Override - protected PrimaryResult performOnPrimary( - final IndexShard primary, final GlobalCheckpointSyncAction.Request request) throws Exception { - primary.sync(); - return new PrimaryResult(request, new ReplicationResponse()); + protected void performOnPrimary(IndexShard primary, GlobalCheckpointSyncAction.Request request, + ActionListener listener) { + ActionListener.completeWith(listener, () -> { + primary.sync(); + return new PrimaryResult(request, new ReplicationResponse()); + }); } @Override @@ -879,10 +876,12 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } @Override - protected PrimaryResult performOnPrimary(IndexShard primary, ResyncReplicationRequest request) throws Exception { - final TransportWriteAction.WritePrimaryResult result = - executeResyncOnPrimary(primary, request); - return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + protected void performOnPrimary(IndexShard primary, ResyncReplicationRequest request, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + final TransportWriteAction.WritePrimaryResult result = + executeResyncOnPrimary(primary, request); + return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + }); } @Override @@ -895,7 +894,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase private TransportWriteAction.WritePrimaryResult executeResyncOnPrimary( IndexShard primary, ResyncReplicationRequest request) throws Exception { final TransportWriteAction.WritePrimaryResult result = - new TransportWriteAction.WritePrimaryResult<>(TransportResyncReplicationAction.performOnPrimary(request, primary), + new TransportWriteAction.WritePrimaryResult<>(TransportResyncReplicationAction.performOnPrimary(request), new ResyncReplicationResponse(), null, null, primary, logger); TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.location, logger); return result; @@ -922,9 +921,12 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } @Override - protected PrimaryResult performOnPrimary(IndexShard primary, RetentionLeaseSyncAction.Request request) throws Exception { - primary.persistRetentionLeases(); - return new PrimaryResult(request, new RetentionLeaseSyncAction.Response()); + protected void performOnPrimary(IndexShard primary, RetentionLeaseSyncAction.Request request, + ActionListener listener) { + ActionListener.completeWith(listener, () -> { + primary.persistRetentionLeases(); + return new PrimaryResult(request, new RetentionLeaseSyncAction.Response()); + }); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java index 74e9ceda670..45ffbf6998d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -61,13 +61,13 @@ public class TransportBulkShardOperationsAction } @Override - protected WritePrimaryResult shardOperationOnPrimary( - final BulkShardOperationsRequest request, final IndexShard primary) throws Exception { + protected void shardOperationOnPrimary(BulkShardOperationsRequest request, IndexShard primary, + ActionListener> listener) { if (logger.isTraceEnabled()) { logger.trace("index [{}] on the following primary shard {}", request.getOperations(), primary.routingEntry()); } - return shardOperationOnPrimary(request.shardId(), request.getHistoryUUID(), request.getOperations(), - request.getMaxSeqNoOfUpdatesOrDeletes(), primary, logger); + ActionListener.completeWith(listener, () -> shardOperationOnPrimary(request.shardId(), request.getHistoryUUID(), + request.getOperations(), request.getMaxSeqNoOfUpdatesOrDeletes(), primary, logger)); } public static Translog.Operation rewriteOperationWithPrimaryTerm(Translog.Operation operation, long primaryTerm) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index adbff7fb29f..c5a357c7df8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -643,20 +643,22 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest } @Override - protected PrimaryResult performOnPrimary(IndexShard primary, BulkShardOperationsRequest request) throws Exception { - final PlainActionFuture permitFuture = new PlainActionFuture<>(); - primary.acquirePrimaryOperationPermit(permitFuture, ThreadPool.Names.SAME, request); - final TransportWriteAction.WritePrimaryResult ccrResult; - try (Releasable ignored = permitFuture.get()) { - ccrResult = TransportBulkShardOperationsAction.shardOperationOnPrimary(primary.shardId(), request.getHistoryUUID(), - request.getOperations(), request.getMaxSeqNoOfUpdatesOrDeletes(), primary, logger); - } - return new PrimaryResult(ccrResult.replicaRequest(), ccrResult.finalResponseIfSuccessful) { - @Override - public void respond(ActionListener listener) { - ccrResult.respond(listener); + protected void performOnPrimary(IndexShard primary, BulkShardOperationsRequest request, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + final PlainActionFuture permitFuture = new PlainActionFuture<>(); + primary.acquirePrimaryOperationPermit(permitFuture, ThreadPool.Names.SAME, request); + final TransportWriteAction.WritePrimaryResult ccrResult; + try (Releasable ignored = permitFuture.get()) { + ccrResult = TransportBulkShardOperationsAction.shardOperationOnPrimary(primary.shardId(), request.getHistoryUUID(), + request.getOperations(), request.getMaxSeqNoOfUpdatesOrDeletes(), primary, logger); } - }; + return new PrimaryResult(ccrResult.replicaRequest(), ccrResult.finalResponseIfSuccessful) { + @Override + public void respond(ActionListener listener) { + ccrResult.respond(listener); + } + }; + }); } @Override From 9e32e36799e4c770b5f96262601b6cf5680fcbdc Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 11 Apr 2019 11:19:56 -0500 Subject: [PATCH 51/60] [ML] fixing test related to #40963 (#41074) (#41116) --- .../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 202416a1b85..b348a9cef60 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -105,7 +105,6 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { assertEquals(1, XContentMapValues.extractValue("count", transforms)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40963") @SuppressWarnings("unchecked") public void testGetPersistedStatsWithoutTask() throws Exception { createPivotReviewsTransform("pivot_stats_1", "pivot_reviews_stats_1", null); @@ -131,7 +130,6 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { for (Map transformStats : transformsStats) { Map stat = (Map)transformStats.get("stats"); assertThat(((Integer)stat.get("documents_processed")), greaterThan(0)); - assertThat(((Integer)stat.get("search_time_in_ms")), greaterThan(0)); assertThat(((Integer)stat.get("search_total")), greaterThan(0)); assertThat(((Integer)stat.get("pages_processed")), greaterThan(0)); } From 05cf53934ad4ecff64f6e392a471578a41e2dd02 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 11 Apr 2019 13:54:41 -0500 Subject: [PATCH 52/60] [ML] checking if p-tasks metadata is null before updating state (#41091) (#41123) * [ML] checking if p-tasks metadata is null before updating state * Adding test that validates fix * removing debug println --- .../org/elasticsearch/xpack/ml/MlConfigMigrator.java | 12 ++++++++---- .../xpack/ml/integration/MlConfigMigratorIT.java | 12 +++++++++++- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index a59769f97ce..d1673dd3c91 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -246,10 +246,14 @@ public class MlConfigMigrator { currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE), currentState.nodes()); ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(MlMetadata.TYPE, removed.mlMetadata) - .putCustom(PersistentTasksCustomMetaData.TYPE, updatedTasks) - .build()); + MetaData.Builder metaDataBuilder = MetaData.builder(currentState.getMetaData()) + .putCustom(MlMetadata.TYPE, removed.mlMetadata); + + // If there are no tasks in the cluster state metadata to begin with, this could be null. + if (updatedTasks != null) { + metaDataBuilder = metaDataBuilder.putCustom(PersistentTasksCustomMetaData.TYPE, updatedTasks); + } + newState.metaData(metaDataBuilder.build()); return newState.build(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java index 0eda4d4dcad..69b82ef9846 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.integration; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -47,10 +48,12 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; @@ -148,9 +151,13 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase { .routingTable(routingTable.build()) .build(); when(clusterService.state()).thenReturn(clusterState); - + List customs = new ArrayList<>(); doAnswer(invocation -> { ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1]; + ClusterState result = listener.execute(clusterState); + for (ObjectCursor value : result.metaData().customs().values()){ + customs.add(value.value); + } listener.clusterStateProcessed("source", mock(ClusterState.class), mock(ClusterState.class)); return null; }).when(clusterService).submitStateUpdateTask(eq("remove-migrated-ml-configs"), any()); @@ -164,6 +171,9 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase { blockingCall(actionListener -> mlConfigMigrator.migrateConfigs(clusterState, actionListener), responseHolder, exceptionHolder); + // Verify that we have custom values in the new cluster state and that none of them is null + assertThat(customs.size(), greaterThan(0)); + assertThat(customs.stream().anyMatch(Objects::isNull), is(false)); assertNull(exceptionHolder.get()); assertTrue(responseHolder.get()); assertSnapshot(mlMetadata.build()); From e120deb08ff8689a6cd8c0af97cb92d0c3a12128 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 11 Apr 2019 12:04:04 -0700 Subject: [PATCH 53/60] [DOCS] Fixes callout for Asciidoctor migration (#41127) --- .../aggregations/bucket/daterange-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc index b49466b8180..fdb6e07b098 100644 --- a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc @@ -278,7 +278,7 @@ POST /sales/_search?size=0 "time_zone": "CET", "ranges": [ { "to": "2016/02/01" }, <1> - { "from": "2016/02/01", "to" : "now/d" <2>}, + { "from": "2016/02/01", "to" : "now/d" }, <2> { "from": "now/d" } ] } From b522de975da43133f1321df24dcecbd34feecd31 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 11 Apr 2019 20:26:18 +0100 Subject: [PATCH 54/60] Move primary term from replicas proxy to repl op (#41119) A small refactoring that removes the primaryTerm field from ReplicasProxy and instead passes it directly in to the methods that need it. Relates #40706. --- ...TransportVerifyShardBeforeCloseAction.java | 12 +-- .../TransportResyncReplicationAction.java | 16 ++- .../replication/ReplicationOperation.java | 102 ++++++++++-------- .../TransportReplicationAction.java | 18 ++-- .../replication/TransportWriteAction.java | 13 +-- ...portVerifyShardBeforeCloseActionTests.java | 10 +- .../ReplicationOperationTests.java | 48 ++++----- .../TransportReplicationActionTests.java | 19 ++-- .../TransportWriteActionTests.java | 9 +- .../ESIndexLevelReplicationTestCase.java | 11 +- 10 files changed, 130 insertions(+), 128 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index f29bf6987a0..7d691717de1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -115,8 +115,8 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA } @Override - protected ReplicationOperation.Replicas newReplicasProxy(final long primaryTerm) { - return new VerifyShardBeforeCloseActionReplicasProxy(primaryTerm); + protected ReplicationOperation.Replicas newReplicasProxy() { + return new VerifyShardBeforeCloseActionReplicasProxy(); } /** @@ -125,13 +125,9 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA * or reopened in an unverified state with potential non flushed translog operations. */ class VerifyShardBeforeCloseActionReplicasProxy extends ReplicasProxy { - - VerifyShardBeforeCloseActionReplicasProxy(final long primaryTerm) { - super(primaryTerm); - } - @Override - public void markShardCopyAsStaleIfNeeded(final ShardId shardId, final String allocationId, final ActionListener listener) { + public void markShardCopyAsStaleIfNeeded(final ShardId shardId, final String allocationId, final long primaryTerm, + final ActionListener listener) { shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, listener); } } diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index bfe32749961..464cd3168bf 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -68,8 +68,8 @@ public class TransportResyncReplicationAction extends TransportWriteAction shardOperationOnReplica(ResyncReplicationRequest request, + IndexShard replica) throws Exception { Translog.Location location = performOnReplica(request, replica); - return new WriteReplicaResult(request, location, null, replica, logger); + return new WriteReplicaResult<>(request, location, null, replica, logger); } public static Translog.Location performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { @@ -174,12 +175,9 @@ public class TransportResyncReplicationAction extends TransportWriteAction listener) { + public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, + ActionListener listener) { shardStateAction.remoteShardFailed( replica.shardId(), replica.allocationId().getId(), primaryTerm, false, message, exception, listener); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index c8c102dfd85..8f5d2a2ca53 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -71,7 +71,10 @@ public class ReplicationOperation< private final Primary primary; private final Replicas replicasProxy; private final AtomicBoolean finished = new AtomicBoolean(); - protected final ActionListener resultListener; + private final long primaryTerm; + + // exposed for tests + final ActionListener resultListener; private volatile PrimaryResultT primaryResult = null; @@ -80,13 +83,14 @@ public class ReplicationOperation< public ReplicationOperation(Request request, Primary primary, ActionListener listener, Replicas replicas, - Logger logger, String opType) { + Logger logger, String opType, long primaryTerm) { this.replicasProxy = replicas; this.primary = primary; this.resultListener = listener; this.logger = logger; this.request = request; this.opType = opType; + this.primaryTerm = primaryTerm; } public void execute() throws Exception { @@ -137,7 +141,7 @@ public class ReplicationOperation< // if inSyncAllocationIds contains allocation ids of shards that don't exist in RoutingTable, mark copies as stale for (String allocationId : replicationGroup.getUnavailableInSyncShards()) { pendingActions.incrementAndGet(); - replicasProxy.markShardCopyAsStaleIfNeeded(replicaRequest.shardId(), allocationId, + replicasProxy.markShardCopyAsStaleIfNeeded(replicaRequest.shardId(), allocationId, primaryTerm, ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary)); } } @@ -165,44 +169,45 @@ public class ReplicationOperation< totalShards.incrementAndGet(); pendingActions.incrementAndGet(); - replicasProxy.performOn(shard, replicaRequest, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, new ActionListener() { - @Override - public void onResponse(ReplicaResponse response) { - successfulShards.incrementAndGet(); - try { - primary.updateLocalCheckpointForShard(shard.allocationId().getId(), response.localCheckpoint()); - primary.updateGlobalCheckpointForShard(shard.allocationId().getId(), response.globalCheckpoint()); - } catch (final AlreadyClosedException e) { - // okay, the index was deleted or this shard was never activated after a relocation; fall through and finish normally - } catch (final Exception e) { - // fail the primary but fall through and let the rest of operation processing complete - final String message = String.format(Locale.ROOT, "primary failed updating local checkpoint for replica %s", shard); - primary.failShard(message, e); + replicasProxy.performOn(shard, replicaRequest, primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, + new ActionListener() { + @Override + public void onResponse(ReplicaResponse response) { + successfulShards.incrementAndGet(); + try { + primary.updateLocalCheckpointForShard(shard.allocationId().getId(), response.localCheckpoint()); + primary.updateGlobalCheckpointForShard(shard.allocationId().getId(), response.globalCheckpoint()); + } catch (final AlreadyClosedException e) { + // the index was deleted or this shard was never activated after a relocation; fall through and finish normally + } catch (final Exception e) { + // fail the primary but fall through and let the rest of operation processing complete + final String message = String.format(Locale.ROOT, "primary failed updating local checkpoint for replica %s", shard); + primary.failShard(message, e); + } + decPendingAndFinishIfNeeded(); } - decPendingAndFinishIfNeeded(); - } - @Override - public void onFailure(Exception replicaException) { - logger.trace(() -> new ParameterizedMessage( - "[{}] failure while performing [{}] on replica {}, request [{}]", - shard.shardId(), opType, shard, replicaRequest), replicaException); - // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. - if (TransportActions.isShardNotAvailableException(replicaException) == false) { - RestStatus restStatus = ExceptionsHelper.status(replicaException); - shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure( - shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); + @Override + public void onFailure(Exception replicaException) { + logger.trace(() -> new ParameterizedMessage( + "[{}] failure while performing [{}] on replica {}, request [{}]", + shard.shardId(), opType, shard, replicaRequest), replicaException); + // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. + if (TransportActions.isShardNotAvailableException(replicaException) == false) { + RestStatus restStatus = ExceptionsHelper.status(replicaException); + shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure( + shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); + } + String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); + replicasProxy.failShardIfNeeded(shard, primaryTerm, message, replicaException, + ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary)); } - String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); - replicasProxy.failShardIfNeeded(shard, message, replicaException, - ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary)); - } - @Override - public String toString() { - return "[" + replicaRequest + "][" + shard + "]"; - } - }); + @Override + public String toString() { + return "[" + replicaRequest + "][" + shard + "]"; + } + }); } private void onNoLongerPrimary(Exception failure) { @@ -373,25 +378,27 @@ public class ReplicationOperation< * * @param replica the shard this request should be executed on * @param replicaRequest the operation to perform + * @param primaryTerm the primary term * @param globalCheckpoint the global checkpoint on the primary * @param maxSeqNoOfUpdatesOrDeletes the max seq_no of updates (index operations overwriting Lucene) or deletes on primary * after this replication was executed on it. * @param listener callback for handling the response or failure */ - void performOn(ShardRouting replica, RequestT replicaRequest, long globalCheckpoint, - long maxSeqNoOfUpdatesOrDeletes, ActionListener listener); + void performOn(ShardRouting replica, RequestT replicaRequest, + long primaryTerm, long globalCheckpoint, long maxSeqNoOfUpdatesOrDeletes, ActionListener listener); /** * Fail the specified shard if needed, removing it from the current set * of active shards. Whether a failure is needed is left up to the * implementation. * - * @param replica shard to fail - * @param message a (short) description of the reason - * @param exception the original exception which caused the ReplicationOperation to request the shard to be failed - * @param listener a listener that will be notified when the failing shard has been removed from the in-sync set + * @param replica shard to fail + * @param primaryTerm the primary term + * @param message a (short) description of the reason + * @param exception the original exception which caused the ReplicationOperation to request the shard to be failed + * @param listener a listener that will be notified when the failing shard has been removed from the in-sync set */ - void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener); + void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, ActionListener listener); /** * Marks shard copy as stale if needed, removing its allocation id from @@ -400,9 +407,10 @@ public class ReplicationOperation< * * @param shardId shard id * @param allocationId allocation id to remove from the set of in-sync allocation ids + * @param primaryTerm the primary term * @param listener a listener that will be notified when the failing shard has been removed from the in-sync set */ - void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener); + void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, ActionListener listener); } /** @@ -427,11 +435,11 @@ public class ReplicationOperation< } public static class RetryOnPrimaryException extends ElasticsearchException { - public RetryOnPrimaryException(ShardId shardId, String msg) { + RetryOnPrimaryException(ShardId shardId, String msg) { this(shardId, msg, null); } - public RetryOnPrimaryException(ShardId shardId, String msg, Throwable cause) { + RetryOnPrimaryException(ShardId shardId, String msg, Throwable cause) { super(msg, cause); setShard(shardId); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 2e3440a9142..82dd77d6b58 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -164,8 +164,8 @@ public abstract class TransportReplicationAction< new ReroutePhase((ReplicationTask) task, request, listener).run(); } - protected ReplicationOperation.Replicas newReplicasProxy(long primaryTerm) { - return new ReplicasProxy(primaryTerm); + protected ReplicationOperation.Replicas newReplicasProxy() { + return new ReplicasProxy(); } protected abstract Response newResponseInstance(); @@ -410,7 +410,7 @@ public abstract class TransportReplicationAction< Request request, ActionListener> listener, PrimaryShardReference primaryShardReference) { return new ReplicationOperation<>(request, primaryShardReference, listener, - newReplicasProxy(primaryRequest.getPrimaryTerm()), logger, actionName); + newReplicasProxy(), logger, actionName, primaryRequest.getPrimaryTerm()); } } @@ -1035,16 +1035,11 @@ public abstract class TransportReplicationAction< */ protected class ReplicasProxy implements ReplicationOperation.Replicas { - protected final long primaryTerm; - - public ReplicasProxy(long primaryTerm) { - this.primaryTerm = primaryTerm; - } - @Override public void performOn( final ShardRouting replica, final ReplicaRequest request, + final long primaryTerm, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ActionListener listener) { @@ -1065,7 +1060,8 @@ public abstract class TransportReplicationAction< } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { + public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, + ActionListener listener) { // This does not need to fail the shard. The idea is that this // is a non-write operation (something like a refresh or a global // checkpoint sync) and therefore the replica should still be @@ -1074,7 +1070,7 @@ public abstract class TransportReplicationAction< } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, ActionListener listener) { // This does not need to make the shard stale. The idea is that this // is a non-write operation (something like a refresh or a global // checkpoint sync) and therefore the replica should still be diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 15c49d10303..86e2760c901 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -96,8 +96,8 @@ public abstract class TransportWriteAction< } @Override - protected ReplicationOperation.Replicas newReplicasProxy(long primaryTerm) { - return new WriteActionReplicasProxy(primaryTerm); + protected ReplicationOperation.Replicas newReplicasProxy() { + return new WriteActionReplicasProxy(); } /** @@ -371,12 +371,9 @@ public abstract class TransportWriteAction< */ class WriteActionReplicasProxy extends ReplicasProxy { - WriteActionReplicasProxy(long primaryTerm) { - super(primaryTerm); - } - @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { + public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, + ActionListener listener) { if (TransportActions.isShardNotAvailableException(exception) == false) { logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); } @@ -385,7 +382,7 @@ public abstract class TransportWriteAction< } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, ActionListener listener) { shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, listener); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index f90f40311c1..dcdfbb755ba 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -139,7 +139,7 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { final TaskId taskId = new TaskId("_node_id", randomNonNegativeLong()); final TransportVerifyShardBeforeCloseAction.ShardRequest request = new TransportVerifyShardBeforeCloseAction.ShardRequest(indexShard.shardId(), clusterBlock, taskId); - final PlainActionFuture res = PlainActionFuture.newFuture(); + final PlainActionFuture res = PlainActionFuture.newFuture(); action.shardOperationOnPrimary(request, indexShard, ActionListener.wrap( r -> { assertNotNull(r); @@ -228,10 +228,10 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { TaskId taskId = new TaskId(clusterService.localNode().getId(), 0L); TransportVerifyShardBeforeCloseAction.ShardRequest request = new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, clusterBlock, taskId); - ReplicationOperation.Replicas proxy = action.newReplicasProxy(primaryTerm); + ReplicationOperation.Replicas proxy = action.newReplicasProxy(); ReplicationOperation operation = - new ReplicationOperation<>(request, createPrimary(primaryRouting, replicationGroup), listener, proxy, logger, "test"); + TransportVerifyShardBeforeCloseAction.ShardRequest, PrimaryResult> operation = new ReplicationOperation<>( + request, createPrimary(primaryRouting, replicationGroup), listener, proxy, logger, "test", primaryTerm); operation.execute(); final CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); @@ -284,7 +284,7 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { @Override public void perform( - TransportVerifyShardBeforeCloseAction.ShardRequest request, ActionListener listener) { + TransportVerifyShardBeforeCloseAction.ShardRequest request, ActionListener listener) { listener.onResponse(new PrimaryResult(request)); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index d493b703372..c959e3ed45d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -116,10 +116,10 @@ public class ReplicationOperationTests extends ESTestCase { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); - final TestReplicaProxy replicasProxy = new TestReplicaProxy(primaryTerm, simulatedFailures); + final TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup); - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy); + final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); @@ -213,12 +213,12 @@ public class ReplicationOperationTests extends ESTestCase { } else { shardActionFailure = new ShardStateAction.NoLongerPrimaryShardException(failedReplica.shardId(), "the king is dead"); } - final TestReplicaProxy replicasProxy = new TestReplicaProxy(primaryTerm, expectedFailures) { + final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) { @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, + public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, ActionListener shardActionListener) { if (testPrimaryDemotedOnStaleShardCopies) { - super.failShardIfNeeded(replica, message, exception, shardActionListener); + super.failShardIfNeeded(replica, primaryTerm, message, exception, shardActionListener); } else { assertThat(replica, equalTo(failedReplica)); shardActionListener.onFailure(shardActionFailure); @@ -226,11 +226,12 @@ public class ReplicationOperationTests extends ESTestCase { } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener shardActionListener) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, + ActionListener shardActionListener) { if (testPrimaryDemotedOnStaleShardCopies) { shardActionListener.onFailure(shardActionFailure); } else { - super.markShardCopyAsStaleIfNeeded(shardId, allocationId, shardActionListener); + super.markShardCopyAsStaleIfNeeded(shardId, allocationId, primaryTerm, shardActionListener); } } }; @@ -242,7 +243,7 @@ public class ReplicationOperationTests extends ESTestCase { assertTrue(primaryFailed.compareAndSet(false, true)); } }; - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy); + final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -299,7 +300,7 @@ public class ReplicationOperationTests extends ESTestCase { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, - new TestReplicaProxy(primaryTerm)); + new TestReplicaProxy(), primaryTerm); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -343,7 +344,7 @@ public class ReplicationOperationTests extends ESTestCase { final ShardRouting primaryShard = shardRoutingTable.primaryShard(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, () -> initialReplicationGroup), - listener, new TestReplicaProxy(primaryTerm), logger, "test"); + listener, new TestReplicaProxy(), logger, "test", primaryTerm); if (passesActiveShardCheck) { assertThat(op.checkActiveShardCount(), nullValue()); @@ -401,8 +402,8 @@ public class ReplicationOperationTests extends ESTestCase { }; final PlainActionFuture listener = new PlainActionFuture<>(); - final ReplicationOperation.Replicas replicas = new TestReplicaProxy(primaryTerm, Collections.emptyMap()); - TestReplicationOperation operation = new TestReplicationOperation(request, primary, listener, replicas); + final ReplicationOperation.Replicas replicas = new TestReplicaProxy(Collections.emptyMap()); + TestReplicationOperation operation = new TestReplicationOperation(request, primary, listener, replicas, primaryTerm); operation.execute(); assertThat(primaryFailed.get(), equalTo(fatal)); @@ -577,14 +578,11 @@ public class ReplicationOperationTests extends ESTestCase { final Set markedAsStaleCopies = ConcurrentCollections.newConcurrentSet(); - final long primaryTerm; - - TestReplicaProxy(long primaryTerm) { - this(primaryTerm, Collections.emptyMap()); + TestReplicaProxy() { + this(Collections.emptyMap()); } - TestReplicaProxy(long primaryTerm, Map opFailures) { - this.primaryTerm = primaryTerm; + TestReplicaProxy(Map opFailures) { this.opFailures = opFailures; } @@ -592,6 +590,7 @@ public class ReplicationOperationTests extends ESTestCase { public void performOn( final ShardRouting replica, final Request request, + final long primaryTerm, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ActionListener listener) { @@ -609,7 +608,8 @@ public class ReplicationOperationTests extends ESTestCase { } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { + public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, + ActionListener listener) { if (failedReplicas.add(replica) == false) { fail("replica [" + replica + "] was failed twice"); } @@ -621,7 +621,7 @@ public class ReplicationOperationTests extends ESTestCase { } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, ActionListener listener) { if (markedAsStaleCopies.add(allocationId) == false) { fail("replica [" + allocationId + "] was marked as stale twice"); } @@ -631,14 +631,14 @@ public class ReplicationOperationTests extends ESTestCase { class TestReplicationOperation extends ReplicationOperation { TestReplicationOperation(Request request, Primary primary, - ActionListener listener, Replicas replicas) { - this(request, primary, listener, replicas, ReplicationOperationTests.this.logger, "test"); + ActionListener listener, Replicas replicas, long primaryTerm) { + this(request, primary, listener, replicas, ReplicationOperationTests.this.logger, "test", primaryTerm); } TestReplicationOperation(Request request, Primary primary, ActionListener listener, - Replicas replicas, Logger logger, String opType) { - super(request, primary, listener, replicas, logger, opType); + Replicas replicas, Logger logger, String opType, long primaryTerm) { + super(request, primary, listener, replicas, logger, opType, primaryTerm); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index e256ec4e926..2cdd3ad2fe4 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -603,7 +603,7 @@ public class TransportReplicationActionTests extends ESTestCase { Request request, ActionListener> actionListener, TransportReplicationAction.PrimaryShardReference primaryShardReference) { - return new NoopReplicationOperation(request, actionListener) { + return new NoopReplicationOperation(request, actionListener, primaryTerm) { @Override public void execute() throws Exception { assertPhase(task, "primary"); @@ -661,7 +661,7 @@ public class TransportReplicationActionTests extends ESTestCase { Request request, ActionListener> actionListener, TransportReplicationAction.PrimaryShardReference primaryShardReference) { - return new NoopReplicationOperation(request, actionListener) { + return new NoopReplicationOperation(request, actionListener, primaryTerm) { @Override public void execute() throws Exception { assertPhase(task, "primary"); @@ -710,7 +710,8 @@ public class TransportReplicationActionTests extends ESTestCase { ClusterState state = stateWithActivePrimary(index, true, 1 + randomInt(3), randomInt(2)); logger.info("using state: {}", state); setState(clusterService, state); - ReplicationOperation.Replicas proxy = action.newReplicasProxy(state.metaData().index(index).primaryTerm(0)); + final long primaryTerm = state.metaData().index(index).primaryTerm(0); + ReplicationOperation.Replicas proxy = action.newReplicasProxy(); // check that at unknown node fails PlainActionFuture listener = new PlainActionFuture<>(); @@ -720,6 +721,7 @@ public class TransportReplicationActionTests extends ESTestCase { TestShardRouting.newShardRouting(shardId, "NOT THERE", routingState == ShardRoutingState.RELOCATING ? state.nodes().iterator().next().getId() : null, false, routingState), new Request(NO_SHARD_ID), + primaryTerm, randomNonNegativeLong(), randomNonNegativeLong(), listener); @@ -730,7 +732,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardRouting replica = randomFrom(shardRoutings.replicaShards().stream() .filter(ShardRouting::assignedToNode).collect(Collectors.toList())); listener = new PlainActionFuture<>(); - proxy.performOn(replica, new Request(NO_SHARD_ID), randomNonNegativeLong(), randomNonNegativeLong(), listener); + proxy.performOn(replica, new Request(NO_SHARD_ID), primaryTerm, randomNonNegativeLong(), randomNonNegativeLong(), listener); assertFalse(listener.isDone()); CapturingTransport.CapturedRequest[] captures = transport.getCapturedRequestsAndClear(); @@ -753,7 +755,7 @@ public class TransportReplicationActionTests extends ESTestCase { AtomicReference failure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); - proxy.failShardIfNeeded(replica, "test", new ElasticsearchException("simulated"), + proxy.failShardIfNeeded(replica, primaryTerm, "test", new ElasticsearchException("simulated"), ActionListener.wrap(r -> success.set(true), failure::set)); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); // A replication action doesn't not fail the request @@ -836,7 +838,7 @@ public class TransportReplicationActionTests extends ESTestCase { if (throwExceptionOnCreation) { throw new ElasticsearchException("simulated exception, during createReplicatedOperation"); } - return new NoopReplicationOperation(request, actionListener) { + return new NoopReplicationOperation(request, actionListener, primaryTerm) { @Override public void execute() throws Exception { assertIndexShardCounter(1); @@ -1322,8 +1324,9 @@ public class TransportReplicationActionTests extends ESTestCase { class NoopReplicationOperation extends ReplicationOperation> { - NoopReplicationOperation(Request request, ActionListener> listener) { - super(request, null, listener, null, TransportReplicationActionTests.this.logger, "noop"); + NoopReplicationOperation(Request request, ActionListener> listener, + long primaryTerm) { + super(request, null, listener, null, TransportReplicationActionTests.this.logger, "noop", primaryTerm); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 98c4f215fca..1a7e5a73e75 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -278,7 +278,8 @@ public class TransportWriteActionTests extends ESTestCase { ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary(index, true, 1 + randomInt(3), randomInt(2)); logger.info("using state: {}", state); ClusterServiceUtils.setState(clusterService, state); - ReplicationOperation.Replicas proxy = action.newReplicasProxy(state.metaData().index(index).primaryTerm(0)); + final long primaryTerm = state.metaData().index(index).primaryTerm(0); + ReplicationOperation.Replicas proxy = action.newReplicasProxy(); // check that at unknown node fails PlainActionFuture listener = new PlainActionFuture<>(); @@ -288,7 +289,7 @@ public class TransportWriteActionTests extends ESTestCase { TestShardRouting.newShardRouting(shardId, "NOT THERE", routingState == ShardRoutingState.RELOCATING ? state.nodes().iterator().next().getId() : null, false, routingState), new TestRequest(), - randomNonNegativeLong(), randomNonNegativeLong(), listener); + primaryTerm, randomNonNegativeLong(), randomNonNegativeLong(), listener); assertTrue(listener.isDone()); assertListenerThrows("non existent node should throw a NoNodeAvailableException", listener, NoNodeAvailableException.class); @@ -296,7 +297,7 @@ public class TransportWriteActionTests extends ESTestCase { final ShardRouting replica = randomFrom(shardRoutings.replicaShards().stream() .filter(ShardRouting::assignedToNode).collect(Collectors.toList())); listener = new PlainActionFuture<>(); - proxy.performOn(replica, new TestRequest(), randomNonNegativeLong(), randomNonNegativeLong(), listener); + proxy.performOn(replica, new TestRequest(), primaryTerm, randomNonNegativeLong(), randomNonNegativeLong(), listener); assertFalse(listener.isDone()); CapturingTransport.CapturedRequest[] captures = transport.getCapturedRequestsAndClear(); @@ -319,7 +320,7 @@ public class TransportWriteActionTests extends ESTestCase { AtomicReference failure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); - proxy.failShardIfNeeded(replica, "test", new ElasticsearchException("simulated"), + proxy.failShardIfNeeded(replica, primaryTerm, "test", new ElasticsearchException("simulated"), ActionListener.wrap(r -> success.set(true), failure::set)); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); // A write replication action proxy should fail the shard diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c78079169ed..7326b2e8b57 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -608,8 +608,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase public void execute() { try { new ReplicationOperation<>(request, new PrimaryRef(), - ActionListener.wrap(result -> result.respond(listener), listener::onFailure), new ReplicasRef(), logger, opType - ).execute(); + ActionListener.wrap(result -> result.respond(listener), listener::onFailure), new ReplicasRef(), logger, opType, + primaryTerm).execute(); } catch (Exception e) { listener.onFailure(e); } @@ -678,6 +678,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase public void performOn( final ShardRouting replicaRouting, final ReplicaRequest request, + final long primaryTerm, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ActionListener listener) { @@ -708,12 +709,14 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { + public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, + ActionListener listener) { throw new UnsupportedOperationException("failing shard " + replica + " isn't supported. failure: " + message, exception); } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, + ActionListener listener) { throw new UnsupportedOperationException("can't mark " + shardId + ", aid [" + allocationId + "] as stale"); } } From 5cdd87deb7490cc6598c6f1f0761c2ee1e889b99 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 11 Apr 2019 13:58:47 -0700 Subject: [PATCH 55/60] Remove settings members from Node (#40811) This commit removes the settings member variable from Node. This member made it confusing which settings should actually be looked at. Now all settings are accessed through the final environment. --- .../java/org/elasticsearch/node/Node.java | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 356219b046e..8484be006ec 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -238,7 +238,6 @@ public class Node implements Closeable { */ private final Logger logger; private final Injector injector; - private final Settings settings; private final Environment environment; private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; @@ -304,12 +303,12 @@ public class Node implements Closeable { this.pluginsService = new PluginsService(tmpSettings, environment.configFile(), environment.modulesFile(), environment.pluginsFile(), classpathPlugins); - this.settings = pluginsService.updatedSettings(); + final Settings settings = pluginsService.updatedSettings(); localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); // create the environment based on the finalized (processed) view of the settings // this is just to makes sure that people get the same settings, no matter where they ask them from - this.environment = new Environment(this.settings, environment.configFile()); + this.environment = new Environment(settings, environment.configFile()); Environment.assertEquivalent(environment, this.environment); final List> executorBuilders = pluginsService.getExecutorBuilders(settings); @@ -339,7 +338,7 @@ public class Node implements Closeable { .collect(Collectors.toSet()); final SettingsModule settingsModule = - new SettingsModule(this.settings, additionalSettings, additionalSettingsFilter, settingsUpgraders); + new SettingsModule(settings, additionalSettings, additionalSettingsFilter, settingsUpgraders); scriptModule.registerClusterSettingsListeners(settingsModule.getClusterSettings()); resourcesToClose.add(resourceWatcherService); final NetworkService networkService = new NetworkService( @@ -481,7 +480,7 @@ public class Node implements Closeable { modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), transportService, clusterService, threadPool, xContentRegistry)); - final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, + final DiscoveryModule discoveryModule = new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), clusterModule.getAllocationService(), environment.configFile(), gatewayMetaState); @@ -600,7 +599,7 @@ public class Node implements Closeable { * The settings that are used by this node. Contains original settings as well as additional settings provided by plugins. */ public Settings settings() { - return this.settings; + return this.environment.settings(); } /** @@ -666,7 +665,7 @@ public class Node implements Closeable { final MetaData onDiskMetadata; // we load the global state here (the persistent part of the cluster state stored on disk) to // pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state. - if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { + if (DiscoveryNode.isMasterNode(settings()) || DiscoveryNode.isDataNode(settings())) { onDiskMetadata = injector.getInstance(GatewayMetaState.class).getMetaData(); } else { onDiskMetadata = MetaData.EMPTY_META_DATA; @@ -684,7 +683,7 @@ public class Node implements Closeable { : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); discovery.startInitialJoin(); - final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings); + final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); if (initialStateTimeout.millis() > 0) { @@ -723,7 +722,7 @@ public class Node implements Closeable { injector.getInstance(HttpServerTransport.class).start(); - if (WRITE_PORTS_FILE_SETTING.get(settings)) { + if (WRITE_PORTS_FILE_SETTING.get(settings())) { TransportService transport = injector.getInstance(TransportService.class); writePortsFile("transport", transport.boundAddress()); HttpServerTransport http = injector.getInstance(HttpServerTransport.class); @@ -961,7 +960,7 @@ public class Node implements Closeable { private List getCustomNameResolvers(List discoveryPlugins) { List customNameResolvers = new ArrayList<>(); for (DiscoveryPlugin discoveryPlugin : discoveryPlugins) { - NetworkService.CustomNameResolver customNameResolver = discoveryPlugin.getCustomNameResolver(settings); + NetworkService.CustomNameResolver customNameResolver = discoveryPlugin.getCustomNameResolver(settings()); if (customNameResolver != null) { customNameResolvers.add(customNameResolver); } From 9c36ab4ab457115110745a7df1e2b458b6b976c5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 11 Apr 2019 15:53:02 -0400 Subject: [PATCH 56/60] Adjust bwc version for flush parameter validation Relates to #40213 --- .../resources/rest-api-spec/test/indices.flush/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index 6ba46c795da..30a264ca64b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -56,8 +56,8 @@ --- "Flush parameters validation": - skip: - version: " - 7.9.99" - reason: flush parameters validation is introduced in 8.0 + version: " - 7.0.99" + reason: flush parameters validation is introduced in 7.1.0 - do: indices.create: index: test From 0f496842fdbeb9a8d11aa858da3802e57430a52c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 11 Apr 2019 18:41:21 -0400 Subject: [PATCH 57/60] Fix msu assertion in restore shard history test Since #40249, we always reinitialize max_seq_no_of_updates to max_seq_no when a promoting primary restores history regardless of whether it did rollback previously or not. Closes #40929 --- .../org/elasticsearch/index/shard/IndexShardTests.java | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index bf2499c6d1e..042b61c77c2 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1090,7 +1090,6 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(replicaShard, primaryShard); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40929") public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); @@ -1145,13 +1144,8 @@ public class IndexShardTests extends IndexShardTestCase { assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); assertThat(getShardDocUIDs(indexShard), equalTo(docsBeforeRollback)); - if (shouldRollback) { - // we conservatively roll MSU forward to maxSeqNo during restoreLocalHistory, ideally it should become just - // currentMaxSeqNoOfUpdates - assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(maxSeqNo)); - } else { - assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, maxSeqNoOfUpdatesOrDeletes))); - } + // we conservatively roll MSU forward to maxSeqNo during restoreLocalHistory, ideally it should become just currentMaxSeqNoOfUpdates + assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(maxSeqNo)); closeShard(indexShard, false); } From 79c7a57737c7e8201512bab4be8efb4d7a97841c Mon Sep 17 00:00:00 2001 From: Antonio Matarrese Date: Thu, 11 Apr 2019 17:38:25 -0500 Subject: [PATCH 58/60] Use the breadth first collection mode for significant terms aggs. (#29042) This helps avoid memory issues when computing deep sub-aggregations. Because it should be rare to use sub-aggregations with significant terms, we opted to always choose breadth first as opposed to exposing a `collect_mode` option. Closes #28652. --- .../significantterms-aggregation.asciidoc | 6 ++++ .../bucket/terms-aggregation.asciidoc | 1 + ...balOrdinalsSignificantTermsAggregator.java | 11 +++++-- .../SignificantLongTermsAggregator.java | 14 ++++++-- .../SignificantStringTermsAggregator.java | 14 ++++++-- .../SignificantTermsSignificanceScoreIT.java | 33 +++++++++++++++++++ 6 files changed, 71 insertions(+), 8 deletions(-) diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index e29fbac0c56..5766cb1a73e 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -542,6 +542,12 @@ It is possible (although rarely required) to filter the values for which buckets `exclude` parameters which are based on a regular expression string or arrays of exact terms. This functionality mirrors the features described in the <> documentation. +==== Collect mode + +To avoid memory issues, the `significant_terms` aggregation always computes child aggregations in `breadth_first` mode. +A description of the different collection modes can be found in the +<> documentation. + ==== Execution hint There are different mechanisms by which terms aggregations can be executed: diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 8db3c6b59ee..34d50992e80 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -775,6 +775,7 @@ fields, then use `copy_to` in your mapping to create a new dedicated field at index time which contains the values from both fields. You can aggregate on this single field, which will benefit from the global ordinals optimization. +[[search-aggregations-bucket-terms-aggregation-collect]] ==== Collect mode Deferring calculation of child aggregations diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 25f83caa3eb..d641a2773e6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -65,7 +65,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri List pipelineAggregators, Map metaData) throws IOException { super(name, factories, valuesSource, null, format, bucketCountThresholds, includeExclude, context, parent, - forceRemapGlobalOrds, SubAggCollectionMode.DEPTH_FIRST, false, pipelineAggregators, metaData); + forceRemapGlobalOrds, SubAggCollectionMode.BREADTH_FIRST, false, pipelineAggregators, metaData); this.significanceHeuristic = significanceHeuristic; this.termsAggFactory = termsAggFactory; this.numCollectedDocs = 0; @@ -146,12 +146,19 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri } final SignificantStringTerms.Bucket[] list = new SignificantStringTerms.Bucket[ordered.size()]; + final long[] survivingBucketOrds = new long[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { final SignificantStringTerms.Bucket bucket = ordered.pop(); + survivingBucketOrds[i] = bucket.bucketOrd; + list[i] = bucket; + } + + runDeferredCollections(survivingBucketOrds); + + for (SignificantStringTerms.Bucket bucket : list) { // the terms are owned by the BytesRefHash, we need to pull a copy since the BytesRef hash data may be recycled at some point bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes); bucket.aggregations = bucketAggregations(bucket.bucketOrd); - list[i] = bucket; } return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java index 235b3f41c08..2fcba9f09bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java @@ -50,7 +50,7 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { List pipelineAggregators, Map metaData) throws IOException { super(name, factories, valuesSource, format, null, bucketCountThresholds, context, parent, - SubAggCollectionMode.DEPTH_FIRST, false, includeExclude, pipelineAggregators, metaData); + SubAggCollectionMode.BREADTH_FIRST, false, includeExclude, pipelineAggregators, metaData); this.significanceHeuristic = significanceHeuristic; this.termsAggFactory = termsAggFactory; } @@ -106,12 +106,20 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { } } - final SignificantLongTerms.Bucket[] list = new SignificantLongTerms.Bucket[ordered.size()]; + SignificantLongTerms.Bucket[] list = new SignificantLongTerms.Bucket[ordered.size()]; + final long[] survivingBucketOrds = new long[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { final SignificantLongTerms.Bucket bucket = ordered.pop(); - bucket.aggregations = bucketAggregations(bucket.bucketOrd); + survivingBucketOrds[i] = bucket.bucketOrd; list[i] = bucket; } + + runDeferredCollections(survivingBucketOrds); + + for (SignificantLongTerms.Bucket bucket : list) { + bucket.aggregations = bucketAggregations(bucket.bucketOrd); + } + return new SignificantLongTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, subsetSize, supersetSize, significanceHeuristic, Arrays.asList(list)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java index 56258758907..91ade2e42f7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java @@ -57,7 +57,7 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { List pipelineAggregators, Map metaData) throws IOException { super(name, factories, valuesSource, null, format, bucketCountThresholds, includeExclude, aggregationContext, parent, - SubAggCollectionMode.DEPTH_FIRST, false, pipelineAggregators, metaData); + SubAggCollectionMode.BREADTH_FIRST, false, pipelineAggregators, metaData); this.significanceHeuristic = significanceHeuristic; this.termsAggFactory = termsAggFactory; } @@ -113,12 +113,20 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { } final SignificantStringTerms.Bucket[] list = new SignificantStringTerms.Bucket[ordered.size()]; + final long[] survivingBucketOrds = new long[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { final SignificantStringTerms.Bucket bucket = ordered.pop(); - // the terms are owned by the BytesRefHash, we need to pull a copy since the BytesRef hash data may be recycled at some point + survivingBucketOrds[i] = bucket.bucketOrd; + list[i] = bucket; + } + + runDeferredCollections(survivingBucketOrds); + + for (SignificantStringTerms.Bucket bucket : list) { + // the terms are owned by the BytesRefHash, we need to pull a copy since the BytesRef hash data may be + // recycled at some point bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes); bucket.aggregations = bucketAggregations(bucket.bucketOrd); - list[i] = bucket; } return new SignificantStringTerms( name, bucketCountThresholds.getRequiredSize(), diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 658f8308571..d21519fa967 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.plugins.Plugin; @@ -38,6 +39,7 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; @@ -543,6 +545,37 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { } } + /** + * A simple test that adds a sub-aggregation to a significant terms aggregation, + * to help check that sub-aggregation collection is handled correctly. + */ + public void testSubAggregations() throws Exception { + indexEqualTestData(); + + QueryBuilder query = QueryBuilders.termsQuery(TEXT_FIELD, "a", "b"); + AggregationBuilder subAgg = terms("class").field(CLASS_FIELD); + AggregationBuilder agg = significantTerms("significant_terms") + .field(TEXT_FIELD) + .executionHint(randomExecutionHint()) + .significanceHeuristic(new ChiSquare(true, true)) + .minDocCount(1).shardSize(1000).size(1000) + .subAggregation(subAgg); + + SearchResponse response = client().prepareSearch("test") + .setQuery(query) + .addAggregation(agg) + .get(); + assertSearchResponse(response); + + SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); + assertThat(sigTerms.getBuckets().size(), equalTo(2)); + + for (SignificantTerms.Bucket bucket : sigTerms) { + StringTerms terms = bucket.getAggregations().get("class"); + assertThat(terms.getBuckets().size(), equalTo(2)); + } + } + private void indexEqualTestData() throws ExecutionException, InterruptedException { assertAcked(prepareCreate("test") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) From e9999dfa1d4adcb188a109c3db52cadacc254f63 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 11 Apr 2019 22:18:31 -0400 Subject: [PATCH 59/60] Init global checkpoint after copy commit in peer recovery (#40823) Today a new replica of a closed index does not have a safe commit invariant when its engine is opened because we won't initialize the global checkpoint on a recovering replica until the finalize step. With this change, we can achieve that property by creating a new translog with the global checkpoint from the primary at the end of phase 1. --- .../index/engine/ReadOnlyEngine.java | 9 ++- .../recovery/PeerRecoveryTargetService.java | 6 +- .../recovery/RecoveryCleanFilesRequest.java | 74 +++++++++++-------- .../recovery/RecoverySourceHandler.java | 6 +- .../indices/recovery/RecoveryTarget.java | 12 ++- .../recovery/RecoveryTargetHandler.java | 6 +- .../recovery/RemoteRecoveryTargetHandler.java | 4 +- .../IndexLevelReplicationTests.java | 5 +- .../RecoveryDuringReplicationTests.java | 4 +- .../PeerRecoveryTargetServiceTests.java | 2 +- .../recovery/RecoverySourceHandlerTests.java | 6 +- .../indices/recovery/RecoveryTests.java | 16 +++- .../indices/recovery/AsyncRecoveryTarget.java | 4 +- 13 files changed, 94 insertions(+), 60 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index cae83927fdb..fa09b3529d7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -103,15 +103,16 @@ public class ReadOnlyEngine extends Engine { this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; if (seqNoStats == null) { seqNoStats = buildSeqNoStats(lastCommittedSegmentInfos); - // During a peer-recovery the global checkpoint is not known and up to date when the engine - // is created, so we only check the max seq no / global checkpoint coherency when the global + // Before 8.0 the global checkpoint is not known and up to date when the engine is created after + // peer recovery, so we only check the max seq no / global checkpoint coherency when the global // checkpoint is different from the unassigned sequence number value. // In addition to that we only execute the check if the index the engine belongs to has been // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction // that guarantee that all operations have been flushed to Lucene. final long globalCheckpoint = engineConfig.getGlobalCheckpointSupplier().getAsLong(); - if (globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO - && engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_7_0)) { + final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); + if (indexVersionCreated.onOrAfter(Version.V_7_1_0) || + (globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) { if (seqNoStats.getMaxSeqNo() != globalCheckpoint) { assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), globalCheckpoint); throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo() diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 9bb29a14cad..e3125ce5be9 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -115,8 +115,8 @@ public class PeerRecoveryTargetService implements IndexEventListener { FilesInfoRequestHandler()); transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest::new, ThreadPool.Names.GENERIC, new FileChunkTransportRequestHandler()); - transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new - CleanFilesRequestHandler()); + transportService.registerRequestHandler(Actions.CLEAN_FILES, ThreadPool.Names.GENERIC, + RecoveryCleanFilesRequest::new, new CleanFilesRequestHandler()); transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, ThreadPool.Names.GENERIC, RecoveryPrepareForTranslogOperationsRequest::new, new PrepareForTranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, @@ -540,7 +540,7 @@ public class PeerRecoveryTargetService implements IndexEventListener { public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel, Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.target().cleanFiles(request.totalTranslogOps(), request.sourceMetaSnapshot()); + recoveryRef.target().cleanFiles(request.totalTranslogOps(), request.getGlobalCheckpoint(), request.sourceMetaSnapshot()); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java index 4216ea049b4..d23f89a769c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -19,8 +19,10 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.transport.TransportRequest; @@ -29,20 +31,48 @@ import java.io.IOException; public class RecoveryCleanFilesRequest extends TransportRequest { - private long recoveryId; - private ShardId shardId; + private final long recoveryId; + private final ShardId shardId; + private final Store.MetadataSnapshot snapshotFiles; + private final int totalTranslogOps; + private final long globalCheckpoint; - private Store.MetadataSnapshot snapshotFiles; - private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; - - public RecoveryCleanFilesRequest() { - } - - RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Store.MetadataSnapshot snapshotFiles, int totalTranslogOps) { + RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Store.MetadataSnapshot snapshotFiles, + int totalTranslogOps, long globalCheckpoint) { this.recoveryId = recoveryId; this.shardId = shardId; this.snapshotFiles = snapshotFiles; this.totalTranslogOps = totalTranslogOps; + this.globalCheckpoint = globalCheckpoint; + } + + RecoveryCleanFilesRequest(StreamInput in) throws IOException { + super(in); + recoveryId = in.readLong(); + shardId = ShardId.readShardId(in); + snapshotFiles = new Store.MetadataSnapshot(in); + totalTranslogOps = in.readVInt(); + if (in.getVersion().onOrAfter(Version.V_7_1_0)) { + globalCheckpoint = in.readZLong(); + } else { + globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(recoveryId); + shardId.writeTo(out); + snapshotFiles.writeTo(out); + out.writeVInt(totalTranslogOps); + if (out.getVersion().onOrAfter(Version.V_7_1_0)) { + out.writeZLong(globalCheckpoint); + } + } + + public Store.MetadataSnapshot sourceMetaSnapshot() { + return snapshotFiles; } public long recoveryId() { @@ -53,29 +83,11 @@ public class RecoveryCleanFilesRequest extends TransportRequest { return shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); - snapshotFiles = new Store.MetadataSnapshot(in); - totalTranslogOps = in.readVInt(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeLong(recoveryId); - shardId.writeTo(out); - snapshotFiles.writeTo(out); - out.writeVInt(totalTranslogOps); - } - - public Store.MetadataSnapshot sourceMetaSnapshot() { - return snapshotFiles; - } - public int totalTranslogOps() { return totalTranslogOps; } + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 971f705b395..cbd0d1dc2cd 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -177,7 +177,7 @@ public class RecoverySourceHandler { startingSeqNo = 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); - sendFileResult = phase1(phase1Snapshot.getIndexCommit(), () -> estimateNumOps); + sendFileResult = phase1(phase1Snapshot.getIndexCommit(), shard.getGlobalCheckpoint(), () -> estimateNumOps); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -332,7 +332,7 @@ public class RecoverySourceHandler { * segments that are missing. Only segments that have the same size and * checksum can be reused */ - public SendFileResult phase1(final IndexCommit snapshot, final Supplier translogOps) { + public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckpoint, final Supplier translogOps) { cancellableThreads.checkForCancel(); // Total size of segment files that are recovered long totalSize = 0; @@ -422,7 +422,7 @@ public class RecoverySourceHandler { // are deleted try { cancellableThreads.executeIO(() -> - recoveryTarget.cleanFiles(translogOps.get(), recoverySourceMetadata)); + recoveryTarget.cleanFiles(translogOps.get(), globalCheckpoint, recoverySourceMetadata)); } catch (RemoteTransportException | IOException targetException) { final IOException corruptIndexException; // we realized that after the index was copied and we wanted to finalize the recovery diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 388ca35e5b0..fbc0dbde510 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -288,6 +288,9 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget ActionListener.completeWith(listener, () -> { state().getTranslog().totalOperations(totalTranslogOps); indexShard().openEngineAndSkipTranslogRecovery(); + assert indexShard.getGlobalCheckpoint() >= indexShard.seqNoStats().getMaxSeqNo() || + indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_1_0) + : "global checkpoint is not initialized [" + indexShard.seqNoStats() + "]"; return null; }); } @@ -382,7 +385,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget } @Override - public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { state().getTranslog().totalOperations(totalTranslogOps); // first, we go and move files that were created with the recovery id suffix to // the actual names, its ok if we have a corrupted index here, since we have replicas @@ -395,10 +398,11 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { store.ensureIndexHasHistoryUUID(); } - // TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 + assert globalCheckpoint >= Long.parseLong(sourceMetaData.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)) + || indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_1_0) : + "invalid global checkpoint[" + globalCheckpoint + "] source_meta_data [" + sourceMetaData.getCommitUserData() + "]"; final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, - indexShard.getPendingPrimaryTerm()); + indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); if (indexShard.getRetentionLeases().leases().isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 697a168a220..a16fd4b6ab3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -88,10 +88,12 @@ public interface RecoveryTargetHandler { /** * After all source files has been sent over, this command is sent to the target so it can clean any local * files that are not part of the source store + * * @param totalTranslogOps an update number of translog operations that will be replayed later on - * @param sourceMetaData meta data of the source store + * @param globalCheckpoint the global checkpoint on the primary + * @param sourceMetaData meta data of the source store */ - void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException; + void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException; /** writes a partial file chunk to the target store */ void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 357f82a744c..5deb6f6ff9d 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -139,9 +139,9 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { } @Override - public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.CLEAN_FILES, - new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps), + new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps, globalCheckpoint), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 657bbac734b..e25557eaabc 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -121,8 +121,9 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase Future future = shards.asyncRecoverReplica(replica, (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener) { @Override - public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { - super.cleanFiles(totalTranslogOps, sourceMetaData); + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, + Store.MetadataSnapshot sourceMetaData) throws IOException { + super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); latch.countDown(); try { latch.await(); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index d8a7827a8cb..7de35358423 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -853,9 +853,9 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } @Override - public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { blockIfNeeded(RecoveryState.Stage.INDEX); - super.cleanFiles(totalTranslogOps, sourceMetaData); + super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); } @Override diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index a936816b968..bb4c25e6186 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -189,7 +189,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { for (Thread sender : senders) { sender.join(); } - recoveryTarget.cleanFiles(0, sourceSnapshot); + recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), sourceSnapshot); recoveryTarget.decRef(); Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata(); Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 38c6179224a..4ce402163d2 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -473,9 +473,9 @@ public class RecoverySourceHandlerTests extends ESTestCase { between(1, 8)) { @Override - public SendFileResult phase1(final IndexCommit snapshot, final Supplier translogOps) { + public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckpoint, final Supplier translogOps) { phase1Called.set(true); - return super.phase1(snapshot, translogOps); + return super.phase1(snapshot, globalCheckpoint, translogOps); } @Override @@ -715,7 +715,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { } @Override - public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) { } @Override diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 2761333ef56..1dc2ba058b7 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -42,9 +43,11 @@ import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; import org.elasticsearch.index.replication.RecoveryDuringReplicationTests; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; +import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -323,7 +326,18 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { } IndexShard replicaShard = newShard(primaryShard.shardId(), false); updateMappings(replicaShard, primaryShard.indexSettings().getIndexMetaData()); - recoverReplica(replicaShard, primaryShard, true); + recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { + @Override + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { + super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); + assertThat(replicaShard.getGlobalCheckpoint(), equalTo(primaryShard.getGlobalCheckpoint())); + } + @Override + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { + assertThat(globalCheckpoint, equalTo(primaryShard.getGlobalCheckpoint())); + super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); + } + }, true, true); List commits = DirectoryReader.listCommits(replicaShard.store().directory()); long maxSeqNo = Long.parseLong(commits.get(0).getUserData().get(SequenceNumbers.MAX_SEQ_NO)); assertThat(maxSeqNo, lessThanOrEqualTo(globalCheckpoint)); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java index ec34d7e27a3..0622bce2013 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java @@ -75,8 +75,8 @@ public class AsyncRecoveryTarget implements RecoveryTargetHandler { } @Override - public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { - target.cleanFiles(totalTranslogOps, sourceMetaData); + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { + target.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); } @Override From 3df6798c4cf8ecee24e6308f2d13754b62562970 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Fri, 12 Apr 2019 07:29:55 +0200 Subject: [PATCH 60/60] Rollup/DataFrame: disallow partial results (#41114) disallow partial results in rollup and data frame, after this change the client throws an error directly replacing the previous runtime exception thrown, allowing better error handling in implementations. --- .../core/indexing/AsyncTwoPhaseIndexer.java | 24 ++++++++++--------- .../indexing/AsyncTwoPhaseIndexerTests.java | 4 ++-- .../rollup/job/RollupIndexerStateTests.java | 16 +++++++++---- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index e859e0db754..636a3978443 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import java.util.Arrays; import java.util.List; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; @@ -150,8 +149,7 @@ public abstract class AsyncTwoPhaseIndexer { onStart(now, ActionListener.wrap(r -> { - stats.markStartSearch(); - doNextSearch(buildSearchRequest(), ActionListener.wrap(this::onSearchResponse, this::finishWithSearchFailure)); + nextSearch(ActionListener.wrap(this::onSearchResponse, this::finishWithSearchFailure)); }, e -> { finishAndSetState(); onFailure(e); @@ -305,10 +303,9 @@ public abstract class AsyncTwoPhaseIndexer iterationResult = doProcess(searchResponse); @@ -362,18 +359,23 @@ public abstract class AsyncTwoPhaseIndexer 0 && stats.getNumPages() % 50 == 0) { doSaveState(IndexerState.INDEXING, position, () -> { - stats.markStartSearch(); - doNextSearch(buildSearchRequest(), listener); + nextSearch(listener); }); } else { - stats.markStartSearch(); - doNextSearch(buildSearchRequest(), listener); + nextSearch(listener); } } catch (Exception e) { finishWithIndexingFailure(e); } } + private void nextSearch(ActionListener listener) { + stats.markStartSearch(); + // ensure that partial results are not accepted and cause a search failure + SearchRequest searchRequest = buildSearchRequest().allowPartialSearchResults(false); + doNextSearch(searchRequest, listener); + } + /** * Checks the {@link IndexerState} and returns false if the execution should be * stopped. diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index f7f97288b23..b39c4f1a25a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -72,7 +72,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { protected SearchRequest buildSearchRequest() { assertThat(step, equalTo(1)); ++step; - return null; + return new SearchRequest(); } @Override @@ -151,7 +151,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { protected SearchRequest buildSearchRequest() { assertThat(step, equalTo(1)); ++step; - return null; + return new SearchRequest(); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 49baeb4fade..e6264b02bca 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; @@ -197,7 +198,13 @@ public class RollupIndexerStateTests extends ESTestCase { } catch (InterruptedException e) { throw new IllegalStateException(e); } - nextPhase.onResponse(searchFunction.apply(request)); + + try { + SearchResponse response = searchFunction.apply(request); + nextPhase.onResponse(response); + } catch (Exception e) { + nextPhase.onFailure(e); + } } @Override @@ -800,15 +807,14 @@ public class RollupIndexerStateTests extends ESTestCase { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); Function searchFunction = searchRequest -> { - ShardSearchFailure[] failures = new ShardSearchFailure[]{new ShardSearchFailure(new RuntimeException("failed"))}; - return new SearchResponse(null, null, 1, 1, 0, 0, - failures, null); + throw new SearchPhaseExecutionException("query", "Partial shards failure", + new ShardSearchFailure[] { new ShardSearchFailure(new RuntimeException("failed")) }); }; Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer failureConsumer = e -> { - assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for job")); + assertThat(e.getMessage(), startsWith("Partial shards failure")); isFinished.set(true); };