From 669d72e47a84deecc9b547e9ad753b5c269ab8c8 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 5 Apr 2019 09:35:10 +0200 Subject: [PATCH 01/45] Fix dense/sparse vector limit documentation (#40852) The documentation stated a wrong limit of dense/sparse vector sizes. This was changed in #40597 but the documentation was not fixed. --- docs/reference/mapping/types/dense-vector.asciidoc | 2 +- docs/reference/mapping/types/sparse-vector.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index f656092e472..335c8f16ba9 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -5,7 +5,7 @@ experimental[] A `dense_vector` field stores dense vectors of float values. The maximum number of dimensions that can be in a vector should -not exceed 500. The number of dimensions can be +not exceed 1024. The number of dimensions can be different across documents. A `dense_vector` field is a single-valued field. diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index 8ed4920c4e6..70b2ce4ed31 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -5,7 +5,7 @@ experimental[] A `sparse_vector` field stores sparse vectors of float values. The maximum number of dimensions that can be in a vector should -not exceed 500. The number of dimensions can be +not exceed 1024. The number of dimensions can be different across documents. A `sparse_vector` field is a single-valued field. From d8956d260149708b81e81c2446f348090451d3ff Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 5 Apr 2019 08:50:44 +0100 Subject: [PATCH 02/45] Remove test-only customisation from TransReplAct (#40863) The `getIndexShard()` and `sendReplicaRequest()` methods in TransportReplicationAction are effectively only used to customise some behaviour in tests. However there are other ways to do this that do not cause such an obstacle to separating the TransportReplicationAction into its two halves (see #40706). This commit removes these customisation points and injects the test-only behaviour using other techniques. --- .../TransportResyncReplicationAction.java | 14 -- .../TransportReplicationAction.java | 28 +--- .../seqno/GlobalCheckpointSyncAction.java | 16 --- .../TransportReplicationActionTests.java | 31 ++-- ...ReplicationAllPermitsAcquisitionTests.java | 133 +++++++++++------- 5 files changed, 108 insertions(+), 114 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index eb2f18e2e40..de1bf0e517b 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -74,19 +73,6 @@ public class TransportResyncReplicationAction extends TransportWriteAction replicaRequest, - final DiscoveryNode node, - final ActionListener listener) { - if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - super.sendReplicaRequest(replicaRequest, node, listener); - } else { - final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; - listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); - } - } - @Override protected ClusterBlockLevel globalBlockLevel() { // resync should never be blocked because it's an internal action diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index d27b6d4f31b..ac6298c2c86 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -619,7 +619,7 @@ public abstract class TransportReplicationAction< } } - protected IndexShard getIndexShard(final ShardId shardId) { + private IndexShard getIndexShard(final ShardId shardId) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); return indexService.getShard(shardId.id()); } @@ -1058,7 +1058,12 @@ public abstract class TransportReplicationAction< } final ConcreteReplicaRequest replicaRequest = new ConcreteReplicaRequest<>( request, replica.allocationId().getId(), primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes); - sendReplicaRequest(replicaRequest, node, listener); + final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, in -> { + ReplicaResponse replicaResponse = new ReplicaResponse(); + replicaResponse.readFrom(in); + return replicaResponse; + }); + transportService.sendRequest(node, transportReplicaAction, replicaRequest, transportOptions, handler); } @Override @@ -1080,25 +1085,6 @@ public abstract class TransportReplicationAction< } } - /** - * Sends the specified replica request to the specified node. - * - * @param replicaRequest the replica request - * @param node the node to send the request to - * @param listener callback for handling the response or failure - */ - protected void sendReplicaRequest( - final ConcreteReplicaRequest replicaRequest, - final DiscoveryNode node, - final ActionListener listener) { - final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, in -> { - ReplicaResponse replicaResponse = new ReplicaResponse(); - replicaResponse.readFrom(in); - return replicaResponse; - }); - transportService.sendRequest(node, transportReplicaAction, replicaRequest, transportOptions, handler); - } - /** a wrapper class to encapsulate a request when being sent to a specific allocation id **/ public static class ConcreteShardRequest extends TransportRequest { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index 9b55cff8cff..4d3d0123fe6 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -22,16 +22,13 @@ package org.elasticsearch.index.seqno; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -103,19 +100,6 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction< return new ReplicationResponse(); } - @Override - protected void sendReplicaRequest( - final ConcreteReplicaRequest replicaRequest, - final DiscoveryNode node, - final ActionListener listener) { - if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - super.sendReplicaRequest(replicaRequest, node, listener); - } else { - final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; - listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); - } - } - @Override protected PrimaryResult shardOperationOnPrimary( final Request request, final IndexShard indexShard) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 9164d9e4184..02e9ff3146c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -757,7 +757,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertEquals(0, shardFailedRequests.length); } - public void testSeqNoIsSetOnPrimary() throws Exception { + public void testSeqNoIsSetOnPrimary() { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // we use one replica to check the primary term was set on the operation and sent to the replica @@ -788,14 +788,14 @@ public class TransportReplicationActionTests extends ESTestCase { return null; }).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject()); - TestAction action = - new TestAction(Settings.EMPTY, "internal:testSeqNoIsSetOnPrimary", transportService, clusterService, shardStateAction, - threadPool) { - @Override - protected IndexShard getIndexShard(ShardId shardId) { - return shard; - } - }; + final IndexService indexService = mock(IndexService.class); + when(indexService.getShard(shard.shardId().id())).thenReturn(shard); + + final IndicesService indicesService = mock(IndicesService.class); + when(indicesService.indexServiceSafe(shard.shardId().getIndex())).thenReturn(indexService); + + TestAction action = new TestAction(Settings.EMPTY, "internal:testSeqNoIsSetOnPrimary", transportService, clusterService, + shardStateAction, threadPool, indicesService); action.handlePrimaryRequest(concreteShardRequest, createTransportChannel(listener), null); CapturingTransport.CapturedRequest[] requestsToReplicas = transport.capturedRequests(); @@ -1207,11 +1207,16 @@ public class TransportReplicationActionTests extends ESTestCase { private class TestAction extends TransportReplicationAction { - TestAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ShardStateAction shardStateAction, ThreadPool threadPool) { - super(settings, actionName, transportService, clusterService, mockIndicesService(clusterService), threadPool, + this(settings, actionName, transportService, clusterService, shardStateAction, threadPool, mockIndicesService(clusterService)); + } + + TestAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, ShardStateAction shardStateAction, + ThreadPool threadPool, IndicesService indicesService) { + super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), Request::new, Request::new, ThreadPool.Names.SAME); @@ -1241,7 +1246,7 @@ public class TransportReplicationActionTests extends ESTestCase { } } - final IndicesService mockIndicesService(ClusterService clusterService) { + private IndicesService mockIndicesService(ClusterService clusterService) { final IndicesService indicesService = mock(IndicesService.class); when(indicesService.indexServiceSafe(any(Index.class))).then(invocation -> { Index index = (Index)invocation.getArguments()[0]; @@ -1261,7 +1266,7 @@ public class TransportReplicationActionTests extends ESTestCase { return indicesService; } - final IndexService mockIndexService(final IndexMetaData indexMetaData, ClusterService clusterService) { + private IndexService mockIndexService(final IndexMetaData indexMetaData, ClusterService clusterService) { final IndexService indexService = mock(IndexService.class); when(indexService.getShard(anyInt())).then(invocation -> { int shard = (Integer) invocation.getArguments()[0]; diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 8fe204cee2c..b8c87acb56d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -41,18 +41,23 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; @@ -70,6 +75,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -78,6 +84,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_C import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasItem; @@ -85,6 +93,9 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** @@ -163,7 +174,49 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe setState(clusterService, state.build()); final Settings transportSettings = Settings.builder().put("node.name", node1.getId()).build(); - transportService = MockTransportService.createNewService(transportSettings, Version.CURRENT, threadPool, null); + + MockTransport transport = new MockTransport() { + @Override + protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { + assertThat(action, allOf(startsWith("cluster:admin/test/"), endsWith("[r]"))); + assertThat(node, equalTo(node2)); + // node2 doesn't really exist, but we are performing some trickery in mockIndicesService() to pretend that node1 holds both + // the primary and the replica, so redirect the request back to node1. + transportService.sendRequest(transportService.getLocalNode(), action, request, + new TransportResponseHandler() { + @Override + public TransportReplicationAction.ReplicaResponse read(StreamInput in) throws IOException { + final TransportReplicationAction.ReplicaResponse replicaResponse + = new TransportReplicationAction.ReplicaResponse(); + replicaResponse.readFrom(in); + return replicaResponse; + } + + @SuppressWarnings("unchecked") + private TransportResponseHandler getResponseHandler() { + return (TransportResponseHandler) + getResponseHandlers().onResponseReceived(requestId, TransportMessageListener.NOOP_LISTENER); + } + + @Override + public void handleResponse(TransportReplicationAction.ReplicaResponse response) { + getResponseHandler().handleResponse(response); + } + + @Override + public void handleException(TransportException exp) { + getResponseHandler().handleException(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + } + }; + transportService = transport.createTransportService(transportSettings, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + bta -> node1, null, emptySet()); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); @@ -198,7 +251,8 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe final PlainActionFuture listener = new PlainActionFuture<>(); futures[threadId] = listener; - final TestAction singlePermitAction = new SinglePermitWithBlocksAction(Settings.EMPTY, "internalSinglePermit[" + threadId + "]", + final TestAction singlePermitAction = new SinglePermitWithBlocksAction(Settings.EMPTY, + "cluster:admin/test/single_permit[" + threadId + "]", transportService, clusterService, shardStateAction, threadPool, shardId, primary, replica, globalBlock); actions[threadId] = singlePermitAction; @@ -251,8 +305,8 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe logger.trace("now starting the operation that acquires all permits and sets the block in the cluster state"); // An action which acquires all operation permits during execution and set a block - final TestAction allPermitsAction = new AllPermitsThenBlockAction(Settings.EMPTY, "internalAllPermits", transportService, - clusterService, shardStateAction, threadPool, shardId, primary, replica); + final TestAction allPermitsAction = new AllPermitsThenBlockAction(Settings.EMPTY, "cluster:admin/test/all_permits", + transportService, clusterService, shardStateAction, threadPool, shardId, primary, replica); final PlainActionFuture allPermitFuture = new PlainActionFuture<>(); Thread thread = new Thread(() -> { @@ -299,6 +353,7 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe } final Response allPermitsResponse = allPermitFuture.get(); + assertSuccessfulOperation(allPermitsAction, allPermitsResponse); for (int i = 0; i < numOperations; i++) { @@ -357,18 +412,21 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe protected final ShardId shardId; protected final IndexShard primary; protected final IndexShard replica; - protected final SetOnce executedOnPrimary = new SetOnce<>(); - protected final SetOnce executedOnReplica = new SetOnce<>(); + final SetOnce executedOnPrimary; + final SetOnce executedOnReplica = new SetOnce<>(); TestAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, - ShardStateAction shardStateAction, ThreadPool threadPool, ShardId shardId, IndexShard primary, IndexShard replica) { - super(settings, actionName, transportService, clusterService, null, threadPool, shardStateAction, + ShardStateAction shardStateAction, ThreadPool threadPool, ShardId shardId, IndexShard primary, IndexShard replica, + SetOnce executedOnPrimary) { + super(settings, actionName, transportService, clusterService, mockIndicesService(shardId, executedOnPrimary, primary, replica), + threadPool, shardStateAction, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), Request::new, Request::new, ThreadPool.Names.SAME); this.shardId = Objects.requireNonNull(shardId); this.primary = Objects.requireNonNull(primary); assertEquals(shardId, primary.shardId()); this.replica = Objects.requireNonNull(replica); assertEquals(shardId, replica.shardId()); + this.executedOnPrimary = executedOnPrimary; } @Override @@ -391,52 +449,25 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe @Override protected ReplicaResult shardOperationOnReplica(Request shardRequest, IndexShard shard) throws Exception { + assertEquals("Replica is always assigned to node 2 in this test", clusterService.state().nodes().get("_node2").getId(), + shard.routingEntry().currentNodeId()); executedOnReplica.set(true); // The TransportReplicationAction.getIndexShard() method is overridden for testing purpose but we double check here // that the permit has been acquired on the replica shard assertSame(replica, shard); return new ReplicaResult(); } + } - @Override - protected IndexShard getIndexShard(final ShardId shardId) { - if (this.shardId.equals(shardId) == false) { - throw new AssertionError("shard id differs from " + shardId); - } - return (executedOnPrimary.get() == null) ? primary : replica; - } + private static IndicesService mockIndicesService(ShardId shardId, SetOnce executedOnPrimary, IndexShard primary, + IndexShard replica) { + final IndexService indexService = mock(IndexService.class); + when(indexService.getShard(shardId.id())).then(invocation -> (executedOnPrimary.get() == null) ? primary : replica); - @Override - protected void sendReplicaRequest(final ConcreteReplicaRequest replicaRequest, - final DiscoveryNode node, - final ActionListener listener) { - assertEquals("Replica is always assigned to node 2 in this test", clusterService.state().nodes().get("_node2"), node); - try { - handleReplicaRequest(replicaRequest, new TransportChannel() { - @Override - public String getProfileName() { - return null; - } + final IndicesService indicesService = mock(IndicesService.class); + when(indicesService.indexServiceSafe(shardId.getIndex())).then(invocation -> indexService); - @Override - public String getChannelType() { - return null; - } - - @Override - public void sendResponse(TransportResponse response) throws IOException { - listener.onResponse((ReplicationOperation.ReplicaResponse) response); - } - - @Override - public void sendResponse(Exception exception) throws IOException { - listener.onFailure(exception); - } - }, null); - } catch (Exception e) { - listener.onFailure(e); - } - } + return indicesService; } /** @@ -452,7 +483,8 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe SinglePermitWithBlocksAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ShardStateAction shardStateAction, ThreadPool threadPool, ShardId shardId, IndexShard primary, IndexShard replica, boolean globalBlock) { - super(settings, actionName, transportService, clusterService, shardStateAction, threadPool, shardId, primary, replica); + super(settings, actionName, transportService, clusterService, shardStateAction, threadPool, shardId, primary, replica, + new SetOnce<>()); this.globalBlock = globalBlock; } @@ -497,7 +529,8 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe AllPermitsThenBlockAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ShardStateAction shardStateAction, ThreadPool threadPool, ShardId shardId, IndexShard primary, IndexShard replica) { - super(settings, actionName, transportService, clusterService, shardStateAction, threadPool, shardId, primary, replica); + super(settings, actionName, transportService, clusterService, shardStateAction, threadPool, shardId, primary, replica, + new SetOnce<>()); } @Override From 922a70ce326c8d6b98aedc366559e7e746f46f6f Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 5 Apr 2019 09:21:23 +0100 Subject: [PATCH 03/45] Remove unused import Relates #40863 --- .../action/resync/TransportResyncReplicationAction.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index de1bf0e517b..89e20953493 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.resync; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationOperation; From 665f0d81aa5090374a7f51d557885d4f792ccbbf Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 5 Apr 2019 09:01:01 -0500 Subject: [PATCH 04/45] [ML] refactoring start task a bit, removing unused code (#40798) (#40845) --- .../StartDataFrameTransformTaskAction.java | 6 + ...portStartDataFrameTransformTaskAction.java | 22 -- .../DataFramePersistentTaskUtils.java | 41 --- .../dataframe/util/BatchedDataIterator.java | 186 ---------- .../util/TypedChainTaskExecutor.java | 125 ------- .../util/BatchedDataIteratorTests.java | 329 ------------------ .../test/data_frame/transforms_start_stop.yml | 61 ++++ 7 files changed, 67 insertions(+), 703 deletions(-) delete mode 100644 x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java delete mode 100644 x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIterator.java delete mode 100644 x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/TypedChainTaskExecutor.java delete mode 100644 x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java index d3c96fb9cf1..044d9d58aed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -62,6 +63,11 @@ public class StartDataFrameTransformTaskAction extends Action operation) { - DataFrameTransformTask matchingTask = null; - - // todo: re-factor, see rollup TransportTaskHelper - for (Task task : taskManager.getTasks().values()) { - if (task instanceof DataFrameTransformTask - && ((DataFrameTransformTask) task).getTransformId().equals(request.getId())) { - if (matchingTask != null) { - throw new IllegalArgumentException("Found more than one matching task for data frame transform [" + request.getId() - + "] when " + "there should only be one."); - } - matchingTask = (DataFrameTransformTask) task; - } - } - - if (matchingTask != null) { - operation.accept(matchingTask); - } - } - @Override protected void doExecute(Task task, StartDataFrameTransformTaskAction.Request request, ActionListener listener) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java deleted file mode 100644 index 76e635df0d8..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFramePersistentTaskUtils.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.persistence; - -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; - -public final class DataFramePersistentTaskUtils { - - private DataFramePersistentTaskUtils() { - } - - /** - * Check to see if the PersistentTask's cluster state contains the data frame transform(s) we - * are interested in - */ - public static boolean stateHasDataFrameTransforms(String id, ClusterState state) { - boolean hasTransforms = false; - PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - - if (pTasksMeta != null) { - // If the request was for _all transforms, we need to look through the list of - // persistent tasks and see if at least one is a data frame task - if (id.equals(MetaData.ALL)) { - hasTransforms = pTasksMeta.tasks().stream() - .anyMatch(persistentTask -> persistentTask.getTaskName().equals(DataFrameField.TASK_NAME)); - - } else if (pTasksMeta.getTask(id) != null) { - // If we're looking for a single transform, we can just check directly - hasTransforms = true; - } - } - return hasTransforms; - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIterator.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIterator.java deleted file mode 100644 index 56c58252454..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIterator.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.util; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.xpack.core.ClientHelper; - -import java.util.Collection; -import java.util.Collections; -import java.util.NoSuchElementException; -import java.util.Objects; - -/** - * Provides basic tools around scrolling over documents and gathering the data in some Collection - * @param The object type that is being collected - * @param The collection that should be used (i.e. Set, Deque, etc.) - */ -public abstract class BatchedDataIterator> { - private static final Logger LOGGER = LogManager.getLogger(BatchedDataIterator.class); - - private static final String CONTEXT_ALIVE_DURATION = "5m"; - private static final int BATCH_SIZE = 10_000; - - private final Client client; - private final String index; - private volatile long count; - private volatile long totalHits; - private volatile String scrollId; - private volatile boolean isScrollInitialised; - - protected BatchedDataIterator(Client client, String index) { - this.client = Objects.requireNonNull(client); - this.index = Objects.requireNonNull(index); - this.totalHits = 0; - this.count = 0; - } - - /** - * Returns {@code true} if the iteration has more elements. - * (In other words, returns {@code true} if {@link #next} would - * return an element rather than throwing an exception.) - * - * @return {@code true} if the iteration has more elements - */ - public boolean hasNext() { - return !isScrollInitialised || count != totalHits; - } - - /** - * The first time next() is called, the search will be performed and the first - * batch will be given to the listener. Any subsequent call will return the following batches. - *

- * Note that in some implementations it is possible that when there are no - * results at all. {@link BatchedDataIterator#hasNext()} will return {@code true} the first time it is called but then a call - * to this function returns an empty Collection to the listener. - */ - public void next(ActionListener listener) { - if (!hasNext()) { - listener.onFailure(new NoSuchElementException()); - } - - if (!isScrollInitialised) { - ActionListener wrappedListener = ActionListener.wrap( - searchResponse -> { - isScrollInitialised = true; - totalHits = searchResponse.getHits().getTotalHits().value; - scrollId = searchResponse.getScrollId(); - mapHits(searchResponse, listener); - }, - listener::onFailure - ); - initScroll(wrappedListener); - } else { - ActionListener wrappedListener = ActionListener.wrap( - searchResponse -> { - scrollId = searchResponse.getScrollId(); - mapHits(searchResponse, listener); - }, - listener::onFailure - ); - SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId).scroll(CONTEXT_ALIVE_DURATION); - ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, - searchScrollRequest, - wrappedListener, - client::searchScroll); - } - } - - private void initScroll(ActionListener listener) { - LOGGER.trace("ES API CALL: search index {}", index); - - SearchRequest searchRequest = new SearchRequest(index); - searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - searchRequest.scroll(CONTEXT_ALIVE_DURATION); - searchRequest.source(new SearchSourceBuilder() - .fetchSource(getFetchSourceContext()) - .size(getBatchSize()) - .query(getQuery()) - .trackTotalHits(true) - .sort(sortField(), sortOrder())); - - ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, - searchRequest, - listener, - client::search); - } - - private void mapHits(SearchResponse searchResponse, ActionListener mappingListener) { - E results = getCollection(); - - SearchHit[] hits = searchResponse.getHits().getHits(); - for (SearchHit hit : hits) { - T mapped = map(hit); - if (mapped != null) { - results.add(mapped); - } - } - count += hits.length; - - if (!hasNext() && scrollId != null) { - ClearScrollRequest request = client.prepareClearScroll().setScrollIds(Collections.singletonList(scrollId)).request(); - ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, - request, - ActionListener.wrap( - r -> mappingListener.onResponse(results), - mappingListener::onFailure - ), - client::clearScroll); - } else { - mappingListener.onResponse(results); - } - } - - /** - * Get the query to use for the search - * @return the search query - */ - protected abstract QueryBuilder getQuery(); - - /** - * Maps the search hit to the document type - * @param hit the search hit - * @return The mapped document or {@code null} if the mapping failed - */ - protected abstract T map(SearchHit hit); - - protected abstract E getCollection(); - - protected abstract SortOrder sortOrder(); - - protected abstract String sortField(); - - /** - * Should we fetch the source and what fields specifically. - * - * Defaults to all fields and true. - */ - protected FetchSourceContext getFetchSourceContext() { - return FetchSourceContext.FETCH_SOURCE; - } - - protected int getBatchSize() { - return BATCH_SIZE; - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/TypedChainTaskExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/TypedChainTaskExecutor.java deleted file mode 100644 index 6657a1a81c7..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/util/TypedChainTaskExecutor.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.dataframe.util; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.ExecutorService; -import java.util.function.Predicate; - -/** - * A utility that allows chained (serial) execution of a number of tasks - * in async manner. - */ -public class TypedChainTaskExecutor { - - public interface ChainTask { - void run(ActionListener listener); - } - - private final ExecutorService executorService; - private final LinkedList> tasks = new LinkedList<>(); - private final Predicate failureShortCircuitPredicate; - private final Predicate continuationPredicate; - private final List collectedResponses; - - /** - * Creates a new TypedChainTaskExecutor. - * Each chainedTask is executed in order serially and after each execution the continuationPredicate is tested. - * - * On failures the failureShortCircuitPredicate is tested. - * - * @param executorService The service where to execute the tasks - * @param continuationPredicate The predicate to test on whether to execute the next task or not. - * {@code true} means continue on to the next task. - * Must be able to handle null values. - * @param failureShortCircuitPredicate The predicate on whether to short circuit execution on a give exception. - * {@code true} means that no more tasks should execute and the the listener::onFailure should be - * called. - */ - public TypedChainTaskExecutor(ExecutorService executorService, - Predicate continuationPredicate, - Predicate failureShortCircuitPredicate) { - this.executorService = Objects.requireNonNull(executorService); - this.continuationPredicate = continuationPredicate; - this.failureShortCircuitPredicate = failureShortCircuitPredicate; - this.collectedResponses = new ArrayList<>(); - } - - public synchronized void add(ChainTask task) { - tasks.add(task); - } - - private synchronized void execute(T previousValue, ActionListener> listener) { - collectedResponses.add(previousValue); - if (continuationPredicate.test(previousValue)) { - if (tasks.isEmpty()) { - listener.onResponse(Collections.unmodifiableList(new ArrayList<>(collectedResponses))); - return; - } - ChainTask task = tasks.pop(); - executorService.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (failureShortCircuitPredicate.test(e)) { - listener.onFailure(e); - } else { - execute(null, listener); - } - } - - @Override - protected void doRun() { - task.run(ActionListener.wrap(value -> execute(value, listener), this::onFailure)); - } - }); - } else { - listener.onResponse(Collections.unmodifiableList(new ArrayList<>(collectedResponses))); - } - } - - /** - * Execute all the chained tasks serially, notify listener when completed - * - * @param listener The ActionListener to notify when all executions have been completed, - * or when no further tasks should be executed. - * The resulting list COULD contain null values depending on if execution is continued - * on exceptions or not. - */ - public synchronized void execute(ActionListener> listener) { - if (tasks.isEmpty()) { - listener.onResponse(Collections.emptyList()); - return; - } - collectedResponses.clear(); - ChainTask task = tasks.pop(); - executorService.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (failureShortCircuitPredicate.test(e)) { - listener.onFailure(e); - } else { - execute(null, listener); - } - } - - @Override - protected void doRun() { - task.run(ActionListener.wrap(value -> execute(value, listener), this::onFailure)); - } - }); - } - - public synchronized List getCollectedResponses() { - return Collections.unmodifiableList(new ArrayList<>(collectedResponses)); - } -} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java deleted file mode 100644 index 4ca60acac37..00000000000 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.dataframe.util; - -import org.apache.lucene.search.TotalHits; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClearScrollRequestBuilder; -import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.junit.Before; -import org.mockito.Mockito; - -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Deque; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.concurrent.ExecutionException; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class BatchedDataIteratorTests extends ESTestCase { - - private static final String INDEX_NAME = "some_index_name"; - private static final String SCROLL_ID = "someScrollId"; - - private Client client; - private boolean wasScrollCleared; - - private TestIterator testIterator; - - private List searchRequestCaptor = new ArrayList<>(); - private List searchScrollRequestCaptor = new ArrayList<>(); - - @Before - public void setUpMocks() { - ThreadPool pool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(pool.getThreadContext()).thenReturn(threadContext); - client = Mockito.mock(Client.class); - when(client.threadPool()).thenReturn(pool); - wasScrollCleared = false; - testIterator = new TestIterator(client, INDEX_NAME); - givenClearScrollRequest(); - searchRequestCaptor.clear(); - searchScrollRequestCaptor.clear(); - } - - public void testQueryReturnsNoResults() throws Exception { - new ScrollResponsesMocker().finishMock(); - - assertTrue(testIterator.hasNext()); - PlainActionFuture> future = new PlainActionFuture<>(); - testIterator.next(future); - assertTrue(future.get().isEmpty()); - assertFalse(testIterator.hasNext()); - assertTrue(wasScrollCleared); - assertSearchRequest(); - assertSearchScrollRequests(0); - } - - public void testCallingNextWhenHasNextIsFalseThrows() throws Exception { - PlainActionFuture> firstFuture = new PlainActionFuture<>(); - new ScrollResponsesMocker().addBatch(createJsonDoc("a"), createJsonDoc("b"), createJsonDoc("c")).finishMock(); - testIterator.next(firstFuture); - firstFuture.get(); - assertFalse(testIterator.hasNext()); - PlainActionFuture> future = new PlainActionFuture<>(); - ExecutionException executionException = ESTestCase.expectThrows(ExecutionException.class, () -> { - testIterator.next(future); - future.get(); - }); - assertNotNull(executionException.getCause()); - assertTrue(executionException.getCause() instanceof NoSuchElementException); - } - - public void testQueryReturnsSingleBatch() throws Exception { - PlainActionFuture> future = new PlainActionFuture<>(); - new ScrollResponsesMocker().addBatch(createJsonDoc("a"), createJsonDoc("b"), createJsonDoc("c")).finishMock(); - - assertTrue(testIterator.hasNext()); - testIterator.next(future); - Deque batch = future.get(); - assertEquals(3, batch.size()); - assertTrue(batch.containsAll(Arrays.asList(createJsonDoc("a"), createJsonDoc("b"), createJsonDoc("c")))); - assertFalse(testIterator.hasNext()); - assertTrue(wasScrollCleared); - - assertSearchRequest(); - assertSearchScrollRequests(0); - } - - public void testQueryReturnsThreeBatches() throws Exception { - PlainActionFuture> future = new PlainActionFuture<>(); - new ScrollResponsesMocker() - .addBatch(createJsonDoc("a"), createJsonDoc("b"), createJsonDoc("c")) - .addBatch(createJsonDoc("d"), createJsonDoc("e")) - .addBatch(createJsonDoc("f")) - .finishMock(); - - assertTrue(testIterator.hasNext()); - - testIterator.next(future); - Deque batch = future.get(); - assertEquals(3, batch.size()); - assertTrue(batch.containsAll(Arrays.asList(createJsonDoc("a"), createJsonDoc("b"), createJsonDoc("c")))); - - future = new PlainActionFuture<>(); - testIterator.next(future); - batch = future.get(); - assertEquals(2, batch.size()); - assertTrue(batch.containsAll(Arrays.asList(createJsonDoc("d"), createJsonDoc("e")))); - - future = new PlainActionFuture<>(); - testIterator.next(future); - batch = future.get(); - assertEquals(1, batch.size()); - assertTrue(batch.contains(createJsonDoc("f"))); - - assertFalse(testIterator.hasNext()); - assertTrue(wasScrollCleared); - - assertSearchRequest(); - assertSearchScrollRequests(2); - } - - private String createJsonDoc(String value) { - return "{\"foo\":\"" + value + "\"}"; - } - - @SuppressWarnings("unchecked") - private void givenClearScrollRequest() { - ClearScrollRequestBuilder requestBuilder = mock(ClearScrollRequestBuilder.class); - - when(client.prepareClearScroll()).thenReturn(requestBuilder); - when(requestBuilder.setScrollIds(Collections.singletonList(SCROLL_ID))).thenReturn(requestBuilder); - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.addScrollId(SCROLL_ID); - when(requestBuilder.request()).thenReturn(clearScrollRequest); - doAnswer((answer) -> { - wasScrollCleared = true; - ActionListener scrollListener = - (ActionListener) answer.getArguments()[1]; - scrollListener.onResponse(new ClearScrollResponse(true,0)); - return null; - }).when(client).clearScroll(any(ClearScrollRequest.class), any(ActionListener.class)); - } - - private void assertSearchRequest() { - List searchRequests = searchRequestCaptor; - assertThat(searchRequests.size(), equalTo(1)); - SearchRequest searchRequest = searchRequests.get(0); - assertThat(searchRequest.indices(), equalTo(new String[] {INDEX_NAME})); - assertThat(searchRequest.scroll().keepAlive(), equalTo(TimeValue.timeValueMinutes(5))); - assertThat(searchRequest.source().query(), equalTo(QueryBuilders.matchAllQuery())); - assertThat(searchRequest.source().trackTotalHitsUpTo(), is(SearchContext.TRACK_TOTAL_HITS_ACCURATE)); - } - - private void assertSearchScrollRequests(int expectedCount) { - List searchScrollRequests = searchScrollRequestCaptor; - assertThat(searchScrollRequests.size(), equalTo(expectedCount)); - for (SearchScrollRequest request : searchScrollRequests) { - assertThat(request.scrollId(), equalTo(SCROLL_ID)); - assertThat(request.scroll().keepAlive(), equalTo(TimeValue.timeValueMinutes(5))); - } - } - - private class ScrollResponsesMocker { - private List batches = new ArrayList<>(); - private long totalHits = 0; - private List responses = new ArrayList<>(); - - ScrollResponsesMocker addBatch(String... hits) { - totalHits += hits.length; - batches.add(hits); - return this; - } - - @SuppressWarnings("unchecked") - void finishMock() { - if (batches.isEmpty()) { - givenInitialResponse(); - return; - } - givenInitialResponse(batches.get(0)); - for (int i = 1; i < batches.size(); ++i) { - givenNextResponse(batches.get(i)); - } - if (responses.size() > 0) { - SearchResponse first = responses.get(0); - if (responses.size() > 1) { - List rest = new ArrayList<>(responses); - Iterator responseIterator = rest.iterator(); - doAnswer((answer) -> { - SearchScrollRequest request = (SearchScrollRequest)answer.getArguments()[0]; - ActionListener rsp = (ActionListener)answer.getArguments()[1]; - searchScrollRequestCaptor.add(request); - rsp.onResponse(responseIterator.next()); - return null; - }).when(client).searchScroll(any(SearchScrollRequest.class), any(ActionListener.class)); - } else { - doAnswer((answer) -> { - SearchScrollRequest request = (SearchScrollRequest)answer.getArguments()[0]; - ActionListener rsp = (ActionListener)answer.getArguments()[1]; - searchScrollRequestCaptor.add(request); - rsp.onResponse(first); - return null; - }).when(client).searchScroll(any(SearchScrollRequest.class), any(ActionListener.class)); - } - } - } - - @SuppressWarnings("unchecked") - private void givenInitialResponse(String... hits) { - SearchResponse searchResponse = createSearchResponseWithHits(hits); - doAnswer((answer) -> { - SearchRequest request = (SearchRequest)answer.getArguments()[0]; - searchRequestCaptor.add(request); - ActionListener rsp = (ActionListener)answer.getArguments()[1]; - rsp.onResponse(searchResponse); - return null; - }).when(client).search(any(SearchRequest.class), any(ActionListener.class)); - } - - private void givenNextResponse(String... hits) { - responses.add(createSearchResponseWithHits(hits)); - } - - private SearchResponse createSearchResponseWithHits(String... hits) { - SearchHits searchHits = createHits(hits); - SearchResponse searchResponse = mock(SearchResponse.class); - when(searchResponse.getScrollId()).thenReturn(SCROLL_ID); - when(searchResponse.getHits()).thenReturn(searchHits); - return searchResponse; - } - - private SearchHits createHits(String... values) { - List hits = new ArrayList<>(); - for (String value : values) { - hits.add(new SearchHitBuilder(randomInt()).setSource(value).build()); - } - return new SearchHits(hits.toArray(new SearchHit[hits.size()]), new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1.0f); - } - } - - private static class TestIterator extends BatchedDataIterator> { - TestIterator(Client client, String jobId) { - super(client, jobId); - } - - @Override - protected QueryBuilder getQuery() { - return QueryBuilders.matchAllQuery(); - } - - @Override - protected String map(SearchHit hit) { - return hit.getSourceAsString(); - } - - @Override - protected Deque getCollection() { - return new ArrayDeque<>(); - } - - @Override - protected SortOrder sortOrder() { - return SortOrder.DESC; - } - - @Override - protected String sortField() { - return "foo"; - } - } - public class SearchHitBuilder { - - private final SearchHit hit; - private final Map fields; - - public SearchHitBuilder(int docId) { - hit = new SearchHit(docId); - fields = new HashMap<>(); - } - - public SearchHitBuilder setSource(String sourceJson) { - hit.sourceRef(new BytesArray(sourceJson)); - return this; - } - - public SearchHit build() { - if (!fields.isEmpty()) { - hit.fields(fields); - } - return hit; - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 56f320d4cb4..23a28e14a86 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -118,3 +118,64 @@ teardown: transform_id: "airline-transform-start-stop" - match: { stopped: true } +--- +"Test start/stop only starts/stops specified transform": + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform-start-later" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-start-later" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-start-stop" + - match: { started: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "airline-transform-start-stop" + - match: { count: 1 } + - match: { transforms.0.id: "airline-transform-start-stop" } + - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.task_state: "started" } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "airline-transform-start-later" + - match: { count: 1 } + - match: { transforms.0.id: "airline-transform-start-later" } + - match: { transforms.0.state.indexer_state: "stopped" } + - match: { transforms.0.state.task_state: "stopped" } + + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-start-later" + - match: { started: true } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "airline-transform-start-stop" + - match: { stopped: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "airline-transform-start-later" + - match: { count: 1 } + - match: { transforms.0.id: "airline-transform-start-later" } + - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.task_state: "started" } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "airline-transform-start-later" + - match: { stopped: true } + + - do: + data_frame.delete_data_frame_transform: + transform_id: "airline-transform-start-later" From eb656244bf4ecd195a4e7083f58335a3e6c4bed8 Mon Sep 17 00:00:00 2001 From: Brandon Kobel Date: Fri, 5 Apr 2019 07:43:20 -0700 Subject: [PATCH 05/45] Add Kibana application privileges for monitoring and ml reserved roles (#40651) * Add Kibana application privileges for monitoring and ml reserved roles * Adding test for kibana-.kibana application explicitly * Whoa there, fat fingered kibana and application... * And I copied something from monitoring I shouldn't have... * And actually doing what Yogesh recommended... --- .../authz/store/ReservedRolesStore.java | 20 ++++++++-- .../authz/store/ReservedRolesStoreTests.java | 37 +++++++++++++++++++ 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index d2745e4ef8e..d9fded1fb2b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -57,8 +57,12 @@ public class ReservedRolesStore implements BiConsumer, ActionListene new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices(".monitoring-*").privileges("read", "read_cross_cluster").build() - }, - null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_monitoring").build() + }, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("remote_monitoring_agent", new RoleDescriptor("remote_monitoring_agent", new String[] { "manage_index_templates", "manage_ingest_pipelines", "monitor", @@ -146,7 +150,11 @@ public class ReservedRolesStore implements BiConsumer, ActionListene RoleDescriptor.IndicesPrivileges.builder().indices(".ml-annotations*") .privileges("view_index_metadata", "read", "write").build() }, - null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_ml").build() + }, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("machine_learning_admin", new RoleDescriptor("machine_learning_admin", new String[] { "manage_ml" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() @@ -155,7 +163,11 @@ public class ReservedRolesStore implements BiConsumer, ActionListene RoleDescriptor.IndicesPrivileges.builder().indices(".ml-annotations*") .privileges("view_index_metadata", "read", "write").build() }, - null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_ml").build() + }, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("data_frame_transforms_admin", new RoleDescriptor("data_frame_transforms_admin", new String[] { "manage_data_frame_transforms" }, new RoleDescriptor.IndicesPrivileges[]{ diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 9d970cca551..c4c2ec871a5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -452,6 +452,18 @@ public class ReservedRolesStoreTests extends ESTestCase { assertThat(monitoringUserRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(true)); assertNoAccessAllowed(monitoringUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(monitoringUserRole.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(monitoringUserRole.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_monitoring", "reserved_monitoring"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(monitoringUserRole.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(monitoringUserRole.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_monitoring", "reserved_monitoring"), "*"), is(false)); } public void testRemoteMonitoringAgentRole() { @@ -957,6 +969,18 @@ public class ReservedRolesStoreTests extends ESTestCase { assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false)); } public void testMachineLearningUserRole() { @@ -1028,6 +1052,19 @@ public class ReservedRolesStoreTests extends ESTestCase { assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false)); } public void testDataFrameTransformsAdminRole() { From 4452e8e10f716931eab3028ca2973610318904ae Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Apr 2019 18:16:11 +0100 Subject: [PATCH 06/45] Mutes GatewayIndexStateIT.testRecoverBrokenIndexMetadata --- .../test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index f431113183f..fa6469037b0 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -340,6 +340,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { * allocated in our metadata that we recover. In that case we now have the ability to check the index on local recovery from disk * if it is sane and if we can successfully create an IndexService. This also includes plugins etc. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40867") public void testRecoverBrokenIndexMetadata() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); From a8dbb07546034313d155618055cd8c8728bd5a6a Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 5 Apr 2019 11:34:20 -0500 Subject: [PATCH 07/45] [ML] Changes default destination index field mapping and adds scripted_metric agg (#40750) (#40846) * [ML] Allowing destination index mappings to have dynamic types, adds script_metric agg * Making dynamic|source mapping explicit --- .../integration/DataFramePivotRestIT.java | 53 +++++++++++ ...nsportPreviewDataFrameTransformAction.java | 1 - .../pivot/AggregationResultUtils.java | 3 + .../transforms/pivot/Aggregations.java | 18 +++- .../transforms/pivot/SchemaUtil.java | 13 ++- .../pivot/AggregationResultUtilsTests.java | 89 +++++++++++++++++++ .../transforms/pivot/AggregationsTests.java | 22 +++++ .../transforms/pivot/PivotTests.java | 16 ++-- 8 files changed, 200 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 95daf11f674..0d14851aa7c 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -314,6 +314,59 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { assertThat(actual, containsString("2017-01-15T")); } + public void testPivotWithScriptedMetricAgg() throws Exception { + String transformId = "scriptedMetricPivot"; + String dataFrameIndex = "scripted_metric_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, + BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + String config = "{" + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"squared_sum\": {" + + " \"scripted_metric\": {" + + " \"init_script\": \"state.reviews_sqrd = []\"," + + " \"map_script\": \"state.reviews_sqrd.add(doc.stars.value * doc.stars.value)\"," + + " \"combine_script\": \"state.reviews_sqrd\"," + + " \"reduce_script\": \"def sum = 0.0; for(l in states){ for(a in l) { sum += a}} return sum\"" + + " } }" + + " } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertTrue(indexExists(dataFrameIndex)); + + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + // we expect 27 documents as there shall be 27 user_id's + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + + // get and check some users + Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(3.878048780, actual.doubleValue(), 0.000001); + actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.squared_sum", searchResult)).get(0); + assertEquals(711.0, actual.doubleValue(), 0.000001); + } + private void assertOnePivotValue(String query, double expected) throws IOException { Map searchResult = getAsMap(query); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index 63b2ed720c0..2a4ba47f507 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -95,6 +95,5 @@ public class TransportPreviewDataFrameTransformAction extends }, listener::onFailure )); - } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index 5d77f82e610..574afd4f2fd 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetric; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; @@ -73,6 +74,8 @@ final class AggregationResultUtils { } else { document.put(aggName, aggResultSingleValue.getValueAsString()); } + } else if (aggResult instanceof ScriptedMetric) { + document.put(aggName, ((ScriptedMetric) aggResult).aggregation()); } else { // Execution should never reach this point! // Creating transforms with unsupported aggregations shall not be possible diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java index 555deae3674..39b139314d4 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java @@ -12,6 +12,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; public final class Aggregations { + + // the field mapping should not explicitly be set and allow ES to dynamically determine mapping via the data. + private static final String DYNAMIC = "_dynamic"; + // the field mapping should be determined explicitly from the source field mapping if possible. + private static final String SOURCE = "_source"; private Aggregations() {} /** @@ -27,9 +32,10 @@ public final class Aggregations { AVG("avg", "double"), CARDINALITY("cardinality", "long"), VALUE_COUNT("value_count", "long"), - MAX("max", null), - MIN("min", null), - SUM("sum", null); + MAX("max", SOURCE), + MIN("min", SOURCE), + SUM("sum", SOURCE), + SCRIPTED_METRIC("scripted_metric", DYNAMIC); private final String aggregationType; private final String targetMapping; @@ -55,8 +61,12 @@ public final class Aggregations { return aggregationSupported.contains(aggregationType.toUpperCase(Locale.ROOT)); } + public static boolean isDynamicMapping(String targetMapping) { + return DYNAMIC.equals(targetMapping); + } + public static String resolveTargetMapping(String aggregationType, String sourceType) { AggregationType agg = AggregationType.valueOf(aggregationType.toUpperCase(Locale.ROOT)); - return agg.getTargetMapping() == null ? sourceType : agg.getTargetMapping(); + return agg.getTargetMapping().equals(SOURCE) ? sourceType : agg.getTargetMapping(); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java index ff967213e81..deb4afdb73d 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRespon import org.elasticsearch.client.Client; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; @@ -75,6 +76,8 @@ public final class SchemaUtil { ValuesSourceAggregationBuilder valueSourceAggregation = (ValuesSourceAggregationBuilder) agg; aggregationSourceFieldNames.put(valueSourceAggregation.getName(), valueSourceAggregation.field()); aggregationTypes.put(valueSourceAggregation.getName(), valueSourceAggregation.getType()); + } else if(agg instanceof ScriptedMetricAggregationBuilder) { + aggregationTypes.put(agg.getName(), agg.getType()); } else { // execution should not reach this point listener.onFailure(new RuntimeException("Unsupported aggregation type [" + agg.getType() + "]")); @@ -127,15 +130,17 @@ public final class SchemaUtil { aggregationTypes.forEach((targetFieldName, aggregationName) -> { String sourceFieldName = aggregationSourceFieldNames.get(targetFieldName); - String destinationMapping = Aggregations.resolveTargetMapping(aggregationName, sourceMappings.get(sourceFieldName)); + String sourceMapping = sourceFieldName == null ? null : sourceMappings.get(sourceFieldName); + String destinationMapping = Aggregations.resolveTargetMapping(aggregationName, sourceMapping); logger.debug( "Deduced mapping for: [" + targetFieldName + "], agg type [" + aggregationName + "] to [" + destinationMapping + "]"); - if (destinationMapping != null) { + if (Aggregations.isDynamicMapping(destinationMapping)) { + logger.info("Dynamic target mapping set for field ["+ targetFieldName +"] and aggregation [" + aggregationName +"]"); + } else if (destinationMapping != null) { targetMapping.put(targetFieldName, destinationMapping); } else { - logger.warn("Failed to deduce mapping for [" + targetFieldName + "], fall back to double."); - targetMapping.put(targetFieldName, "double"); + logger.warn("Failed to deduce mapping for [" + targetFieldName + "], fall back to dynamic mapping."); } }); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java index c2c22dc6ffa..62a4de353bc 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -35,9 +35,11 @@ import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; import org.elasticsearch.search.aggregations.metrics.ParsedMax; import org.elasticsearch.search.aggregations.metrics.ParsedMin; +import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; import org.elasticsearch.search.aggregations.metrics.ParsedStats; import org.elasticsearch.search.aggregations.metrics.ParsedSum; import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; +import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; @@ -76,6 +78,7 @@ public class AggregationResultUtilsTests extends ESTestCase { map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); map.put(AvgAggregationBuilder.NAME, (p, c) -> ParsedAvg.fromXContent(p, (String) c)); + map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c)); map.put(ValueCountAggregationBuilder.NAME, (p, c) -> ParsedValueCount.fromXContent(p, (String) c)); map.put(StatsAggregationBuilder.NAME, (p, c) -> ParsedStats.fromXContent(p, (String) c)); map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); @@ -409,6 +412,92 @@ public class AggregationResultUtilsTests extends ESTestCase { executeTest(groupBy, aggregationBuilders, input, fieldTypeMap, expected, 10); } + public void testExtractCompositeAggregationResultsWithDynamicType() throws IOException { + String targetField = randomAlphaOfLengthBetween(5, 10); + String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; + + GroupConfig groupBy = parseGroupConfig("{" + + "\"" + targetField + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }," + + "\"" + targetField2 + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }" + + "}"); + + String aggName = randomAlphaOfLengthBetween(5, 10); + String aggTypedName = "scripted_metric#" + aggName; + + Collection aggregationBuilders = asList(AggregationBuilders.scriptedMetric(aggName)); + + Map input = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", asMap("field", 123.0)), + DOC_COUNT, 1), + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", asMap("field", 1.0)), + DOC_COUNT, 2), + asMap( + KEY, asMap( + targetField, "ID2", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", asMap("field", 2.13)), + DOC_COUNT, 3), + asMap( + KEY, asMap( + targetField, "ID3", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", asMap("field", 12.0)), + DOC_COUNT, 4) + )); + + List> expected = asList( + asMap( + targetField, "ID1", + targetField2, "ID1_2", + aggName, asMap("field", 123.0) + ), + asMap( + targetField, "ID1", + targetField2, "ID2_2", + aggName, asMap("field", 1.0) + ), + asMap( + targetField, "ID2", + targetField2, "ID1_2", + aggName, asMap("field", 2.13) + ), + asMap( + targetField, "ID3", + targetField2, "ID2_2", + aggName, asMap("field", 12.0) + ) + ); + Map fieldTypeMap = asStringMap( + targetField, "keyword", + targetField2, "keyword" + ); + executeTest(groupBy, aggregationBuilders, input, fieldTypeMap, expected, 10); + } + public void testExtractCompositeAggregationResultsDocIDs() throws IOException { String targetField = randomAlphaOfLengthBetween(5, 10); String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java index 23720ab6af3..47476baebdd 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java @@ -15,9 +15,31 @@ public class AggregationsTests extends ESTestCase { assertEquals("double", Aggregations.resolveTargetMapping("avg", "int")); assertEquals("double", Aggregations.resolveTargetMapping("avg", "double")); + // cardinality + assertEquals("long", Aggregations.resolveTargetMapping("cardinality", "int")); + assertEquals("long", Aggregations.resolveTargetMapping("cardinality", "double")); + + // value_count + assertEquals("long", Aggregations.resolveTargetMapping("value_count", "int")); + assertEquals("long", Aggregations.resolveTargetMapping("value_count", "double")); + // max assertEquals("int", Aggregations.resolveTargetMapping("max", "int")); assertEquals("double", Aggregations.resolveTargetMapping("max", "double")); assertEquals("half_float", Aggregations.resolveTargetMapping("max", "half_float")); + + // min + assertEquals("int", Aggregations.resolveTargetMapping("min", "int")); + assertEquals("double", Aggregations.resolveTargetMapping("min", "double")); + assertEquals("half_float", Aggregations.resolveTargetMapping("min", "half_float")); + + // sum + assertEquals("int", Aggregations.resolveTargetMapping("sum", "int")); + assertEquals("double", Aggregations.resolveTargetMapping("sum", "double")); + assertEquals("half_float", Aggregations.resolveTargetMapping("sum", "half_float")); + + // scripted_metric + assertEquals("_dynamic", Aggregations.resolveTargetMapping("scripted_metric", null)); + assertEquals("_dynamic", Aggregations.resolveTargetMapping("scripted_metric", "int")); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index c39e9a2589f..be23f515ac8 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -37,9 +37,7 @@ import org.junit.Before; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -176,14 +174,20 @@ public class PivotTests extends ESTestCase { } private AggregationConfig getAggregationConfig(String agg) throws IOException { + if (agg.equals(AggregationType.SCRIPTED_METRIC.getName())) { + return parseAggregations("{\"pivot_scripted_metric\": {\n" + + "\"scripted_metric\": {\n" + + " \"init_script\" : \"state.transactions = []\",\n" + + " \"map_script\" : \"state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)\", \n" + + " \"combine_script\" : \"double profit = 0; for (t in state.transactions) { profit += t } return profit\",\n" + + " \"reduce_script\" : \"double profit = 0; for (a in states) { profit += a } return profit\"\n" + + " }\n" + + "}}"); + } return parseAggregations("{\n" + " \"pivot_" + agg + "\": {\n" + " \"" + agg + "\": {\n" + " \"field\": \"values\"\n" + " }\n" + " }" + "}"); } - private Map getFieldMappings() { - return Collections.singletonMap("values", "double"); - } - private AggregationConfig parseAggregations(String json) throws IOException { final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); From 2ff19bc1b7adc2e886ebf88f18f227f7e0e33562 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 5 Apr 2019 19:10:10 +0100 Subject: [PATCH 08/45] Use Writeable for TransportReplAction derivatives (#40905) Relates #34389, backport of #40894. --- .../elasticsearch/action/DocWriteRequest.java | 8 +- ...TransportVerifyShardBeforeCloseAction.java | 11 +-- .../admin/indices/flush/FlushRequest.java | 10 ++- .../indices/flush/ShardFlushRequest.java | 9 +- .../flush/TransportShardFlushAction.java | 2 +- .../admin/indices/refresh/RefreshRequest.java | 7 ++ .../action/bulk/BulkShardRequest.java | 21 ++--- .../TransportSingleItemBulkWriteAction.java | 7 +- .../action/delete/DeleteRequest.java | 54 ++++++------ .../action/index/IndexRequest.java | 82 +++++++++---------- .../resync/ResyncReplicationRequest.java | 54 ++++++------ .../support/broadcast/BroadcastRequest.java | 6 ++ .../replication/BasicReplicationRequest.java | 10 ++- .../replication/ReplicatedWriteRequest.java | 12 +-- .../replication/ReplicationRequest.java | 50 ++++------- .../TransportBroadcastReplicationAction.java | 7 +- .../TransportReplicationAction.java | 78 ++++++++---------- .../replication/TransportWriteAction.java | 6 +- .../action/update/UpdateRequest.java | 6 +- .../index/reindex/ReindexRequest.java | 3 +- .../seqno/GlobalCheckpointSyncAction.java | 5 +- .../RetentionLeaseBackgroundSyncAction.java | 10 +-- .../index/seqno/RetentionLeaseSyncAction.java | 10 +-- .../action/index/IndexRequestTests.java | 13 ++- .../resync/ResyncReplicationRequestTests.java | 3 +- .../BroadcastReplicationTests.java | 17 ++-- .../ReplicationOperationTests.java | 6 +- .../TransportReplicationActionTests.java | 35 ++++---- ...ReplicationAllPermitsAcquisitionTests.java | 10 ++- .../TransportWriteActionTests.java | 8 +- .../ESIndexLevelReplicationTestCase.java | 6 +- .../bulk/BulkShardOperationsRequest.java | 19 +++-- 32 files changed, 297 insertions(+), 288 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 373dfaa5c74..61328a78df6 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -222,13 +222,9 @@ public interface DocWriteRequest extends IndicesRequest { byte type = in.readByte(); DocWriteRequest docWriteRequest; if (type == 0) { - IndexRequest indexRequest = new IndexRequest(); - indexRequest.readFrom(in); - docWriteRequest = indexRequest; + docWriteRequest = new IndexRequest(in); } else if (type == 1) { - DeleteRequest deleteRequest = new DeleteRequest(); - deleteRequest.readFrom(in); - docWriteRequest = deleteRequest; + docWriteRequest = new DeleteRequest(in); } else if (type == 2) { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index cba01a3e9f8..e0cddcb0acf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -136,9 +136,11 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA public static class ShardRequest extends ReplicationRequest { - private ClusterBlock clusterBlock; + private final ClusterBlock clusterBlock; - ShardRequest(){ + ShardRequest(StreamInput in) throws IOException { + super(in); + clusterBlock = new ClusterBlock(in); } public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) { @@ -153,9 +155,8 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - clusterBlock = new ClusterBlock(in); + public void readFrom(final StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java index 7f31890339c..a6a72b92ce7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -52,6 +52,12 @@ public class FlushRequest extends BroadcastRequest { super(indices); } + public FlushRequest(StreamInput in) throws IOException { + super(in); + force = in.readBoolean(); + waitIfOngoing = in.readBoolean(); + } + /** * Returns {@code true} iff a flush should block * if a another flush operation is already running. Otherwise {@code false} @@ -103,9 +109,7 @@ public class FlushRequest extends BroadcastRequest { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - force = in.readBoolean(); - waitIfOngoing = in.readBoolean(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java index ac32b16eb57..8bd3597eba9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java @@ -29,7 +29,7 @@ import java.io.IOException; public class ShardFlushRequest extends ReplicationRequest { - private FlushRequest request = new FlushRequest(); + private final FlushRequest request; public ShardFlushRequest(FlushRequest request, ShardId shardId) { super(shardId); @@ -37,7 +37,9 @@ public class ShardFlushRequest extends ReplicationRequest { this.waitForActiveShards = ActiveShardCount.NONE; // don't wait for any active shards before proceeding, by default } - public ShardFlushRequest() { + public ShardFlushRequest(StreamInput in) throws IOException { + super(in); + request = new FlushRequest(in); } FlushRequest getRequest() { @@ -46,8 +48,7 @@ public class ShardFlushRequest extends ReplicationRequest { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request.readFrom(in); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 344a817fa8b..63424844d7d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -55,7 +55,7 @@ public class TransportShardFlushAction IndexShard primary) { primary.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", primary.shardId()); - return new PrimaryResult(shardRequest, new ReplicationResponse()); + return new PrimaryResult<>(shardRequest, new ReplicationResponse()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java index 20687b8e534..991184508fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -20,6 +20,9 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; /** * A refresh request making all operations performed since the last refresh available for search. The (near) real-time @@ -35,4 +38,8 @@ public class RefreshRequest extends BroadcastRequest { public RefreshRequest(String... indices) { super(indices); } + + public RefreshRequest(StreamInput in) throws IOException { + super(in); + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 2f9a130eb82..1fe763b48fd 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -33,7 +33,14 @@ public class BulkShardRequest extends ReplicatedWriteRequest { private BulkItemRequest[] items; - public BulkShardRequest() { + public BulkShardRequest(StreamInput in) throws IOException { + super(in); + items = new BulkItemRequest[in.readVInt()]; + for (int i = 0; i < items.length; i++) { + if (in.readBoolean()) { + items[i] = BulkItemRequest.readBulkItem(in); + } + } } public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { @@ -60,7 +67,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest { indices.add(item.index()); } } - return indices.toArray(new String[indices.size()]); + return indices.toArray(new String[0]); } @Override @@ -78,14 +85,8 @@ public class BulkShardRequest extends ReplicatedWriteRequest { } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - items = new BulkItemRequest[in.readVInt()]; - for (int i = 0; i < items.length; i++) { - if (in.readBoolean()) { - items[i] = BulkItemRequest.readBulkItem(in); - } - } + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java index cc97b6237e3..c080006b19d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java @@ -28,11 +28,10 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; -import java.util.function.Supplier; - /** use transport bulk action directly */ @Deprecated public abstract class TransportSingleItemBulkWriteAction< @@ -43,8 +42,8 @@ public abstract class TransportSingleItemBulkWriteAction< private final TransportBulkAction bulkAction; protected TransportSingleItemBulkWriteAction(String actionName, TransportService transportService, ActionFilters actionFilters, - Supplier request, TransportBulkAction bulkAction) { - super(actionName, transportService, actionFilters, request); + Writeable.Reader requestReader, TransportBulkAction bulkAction) { + super(actionName, transportService, actionFilters, requestReader); this.bulkAction = bulkAction; } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index a033bf3cb00..115a7bdd742 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -53,6 +53,8 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { + private static final ShardId NO_SHARD_ID = null; + // Set to null initially so we can know to override in bulk requests that have a default type. private String type; private String id; @@ -63,7 +65,27 @@ public class DeleteRequest extends ReplicatedWriteRequest private long ifSeqNo = UNASSIGNED_SEQ_NO; private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + public DeleteRequest(StreamInput in) throws IOException { + super(in); + type = in.readString(); + id = in.readString(); + routing = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0)) { + in.readOptionalString(); // _parent + } + version = in.readLong(); + versionType = VersionType.fromValue(in.readByte()); + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + ifSeqNo = in.readZLong(); + ifPrimaryTerm = in.readVLong(); + } else { + ifSeqNo = UNASSIGNED_SEQ_NO; + ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + } + } + public DeleteRequest() { + super(NO_SHARD_ID); } /** @@ -71,6 +93,7 @@ public class DeleteRequest extends ReplicatedWriteRequest * must be set. */ public DeleteRequest(String index) { + super(NO_SHARD_ID); this.index = index; } @@ -85,6 +108,7 @@ public class DeleteRequest extends ReplicatedWriteRequest */ @Deprecated public DeleteRequest(String index, String type, String id) { + super(NO_SHARD_ID); this.index = index; this.type = type; this.id = id; @@ -97,6 +121,7 @@ public class DeleteRequest extends ReplicatedWriteRequest * @param id The id of the document */ public DeleteRequest(String index, String id) { + super(NO_SHARD_ID); this.index = index; this.id = id; } @@ -274,23 +299,8 @@ public class DeleteRequest extends ReplicatedWriteRequest } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - type = in.readString(); - id = in.readString(); - routing = in.readOptionalString(); - if (in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalString(); // _parent - } - version = in.readLong(); - versionType = VersionType.fromValue(in.readByte()); - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - ifSeqNo = in.readZLong(); - ifPrimaryTerm = in.readVLong(); - } else { - ifSeqNo = UNASSIGNED_SEQ_NO; - ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; - } + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -321,14 +331,4 @@ public class DeleteRequest extends ReplicatedWriteRequest public String toString() { return "delete {[" + index + "][" + type() + "][" + id + "]}"; } - - /** - * Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't - * do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or - * use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set. - */ - @Override - public DeleteRequest setShardId(ShardId shardId) { - throw new UnsupportedOperationException("shard id should never be set on DeleteRequest"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 37d96083177..6d26eccca67 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -83,6 +83,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement */ static final int MAX_SOURCE_LENGTH_IN_TOSTRING = 2048; + private static final ShardId NO_SHARD_ID = null; + // Set to null initially so we can know to override in bulk requests that have a default type. private String type; private String id; @@ -112,8 +114,41 @@ public class IndexRequest extends ReplicatedWriteRequest implement private long ifSeqNo = UNASSIGNED_SEQ_NO; private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + public IndexRequest(StreamInput in) throws IOException { + super(in); + type = in.readOptionalString(); + id = in.readOptionalString(); + routing = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0)) { + in.readOptionalString(); // _parent + } + if (in.getVersion().before(Version.V_6_0_0_alpha1)) { + in.readOptionalString(); // timestamp + in.readOptionalTimeValue(); // ttl + } + source = in.readBytesReference(); + opType = OpType.fromId(in.readByte()); + version = in.readLong(); + versionType = VersionType.fromValue(in.readByte()); + pipeline = in.readOptionalString(); + isRetry = in.readBoolean(); + autoGeneratedTimestamp = in.readLong(); + if (in.readBoolean()) { + contentType = in.readEnum(XContentType.class); + } else { + contentType = null; + } + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + ifSeqNo = in.readZLong(); + ifPrimaryTerm = in.readVLong(); + } else { + ifSeqNo = UNASSIGNED_SEQ_NO; + ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + } + } public IndexRequest() { + super(NO_SHARD_ID); } /** @@ -121,6 +156,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement * {@link #source(byte[], XContentType)} must be set. */ public IndexRequest(String index) { + super(NO_SHARD_ID); this.index = index; } @@ -131,6 +167,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement */ @Deprecated public IndexRequest(String index, String type) { + super(NO_SHARD_ID); this.index = index; this.type = type; } @@ -146,6 +183,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement */ @Deprecated public IndexRequest(String index, String type, String id) { + super(NO_SHARD_ID); this.index = index; this.type = type; this.id = id; @@ -593,37 +631,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - type = in.readOptionalString(); - id = in.readOptionalString(); - routing = in.readOptionalString(); - if (in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalString(); // _parent - } - if (in.getVersion().before(Version.V_6_0_0_alpha1)) { - in.readOptionalString(); // timestamp - in.readOptionalTimeValue(); // ttl - } - source = in.readBytesReference(); - opType = OpType.fromId(in.readByte()); - version = in.readLong(); - versionType = VersionType.fromValue(in.readByte()); - pipeline = in.readOptionalString(); - isRetry = in.readBoolean(); - autoGeneratedTimestamp = in.readLong(); - if (in.readBoolean()) { - contentType = in.readEnum(XContentType.class); - } else { - contentType = null; - } - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - ifSeqNo = in.readZLong(); - ifPrimaryTerm = in.readVLong(); - } else { - ifSeqNo = UNASSIGNED_SEQ_NO; - ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; - } + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -704,15 +713,4 @@ public class IndexRequest extends ReplicatedWriteRequest implement public long getAutoGeneratedTimestamp() { return autoGeneratedTimestamp; } - - /** - * Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't - * do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or - * use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set. - */ - @Override - public IndexRequest setShardId(ShardId shardId) { - throw new UnsupportedOperationException("shard id should never be set on IndexRequest"); - } - } diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index a53766af7cf..de964a40ca4 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -36,12 +36,32 @@ import java.util.Objects; */ public final class ResyncReplicationRequest extends ReplicatedWriteRequest { - private long trimAboveSeqNo; - private Translog.Operation[] operations; - private long maxSeenAutoIdTimestampOnPrimary; + private final long trimAboveSeqNo; + private final Translog.Operation[] operations; + private final long maxSeenAutoIdTimestampOnPrimary; - ResyncReplicationRequest() { - super(); + ResyncReplicationRequest(StreamInput in) throws IOException { + super(in); + assert Version.CURRENT.major <= 7; + if (in.getVersion().equals(Version.V_6_0_0)) { + /* + * Resync replication request serialization was broken in 6.0.0 due to the elements of the stream not being prefixed with a + * byte indicating the type of the operation. + */ + // TODO: remove this check in 8.0.0 which provides no BWC guarantees with 6.x. + throw new IllegalStateException("resync replication request serialization is broken in 6.0.0"); + } + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + trimAboveSeqNo = in.readZLong(); + } else { + trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + } + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + maxSeenAutoIdTimestampOnPrimary = in.readZLong(); + } else { + maxSeenAutoIdTimestampOnPrimary = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; + } + operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); } public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo, final long maxSeenAutoIdTimestampOnPrimary, @@ -65,28 +85,8 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest> extends public BroadcastRequest() { } + public BroadcastRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + protected BroadcastRequest(String[] indices) { this.indices = indices; } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/BasicReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/BasicReplicationRequest.java index b4731d19e29..b70b6b2566d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/BasicReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/BasicReplicationRequest.java @@ -19,8 +19,11 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.ShardId; +import java.io.IOException; + /** * A replication request that has no more information than ReplicationRequest. * Unfortunately ReplicationRequest can't be declared as a type parameter @@ -28,9 +31,6 @@ import org.elasticsearch.index.shard.ShardId; * instead. */ public class BasicReplicationRequest extends ReplicationRequest { - public BasicReplicationRequest() { - } - /** * Creates a new request with resolved shard id */ @@ -38,6 +38,10 @@ public class BasicReplicationRequest extends ReplicationRequest /** * Constructor for deserialization. */ - public ReplicatedWriteRequest() { + public ReplicatedWriteRequest(StreamInput in) throws IOException { + super(in); + refreshPolicy = RefreshPolicy.readFrom(in); } - public ReplicatedWriteRequest(ShardId shardId) { + public ReplicatedWriteRequest(@Nullable ShardId shardId) { super(shardId); } @@ -59,9 +62,8 @@ public abstract class ReplicatedWriteRequest } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - refreshPolicy = RefreshPolicy.readFrom(in); + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index db043238feb..31d18d4dc05 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -54,9 +54,9 @@ public abstract class ReplicationRequest request, ClusterService clusterService, + public TransportBroadcastReplicationAction(String name, Writeable.Reader requestReader, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) { - super(name, transportService, actionFilters, request); + super(name, transportService, actionFilters, requestReader); this.replicatedBroadcastShardAction = replicatedBroadcastShardAction; this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index ac6298c2c86..6df98fbf149 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -83,9 +83,6 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; - -import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; /** * Base class for requests that should be executed on a primary copy followed by replica copies. @@ -120,10 +117,10 @@ public abstract class TransportReplicationAction< ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor) { + IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader requestReader, + Writeable.Reader replicaRequestReader, String executor) { this(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, replicaRequest, executor, false, false); + indexNameExpressionResolver, requestReader, replicaRequestReader, executor, false, false); } @@ -131,8 +128,8 @@ public abstract class TransportReplicationAction< ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor, + IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader requestReader, + Writeable.Reader replicaRequestReader, String executor, boolean syncGlobalCheckpointAfterOperation, boolean forceExecutionOnPrimary) { super(actionName, actionFilters, transportService.getTaskManager()); this.threadPool = threadPool; @@ -146,14 +143,14 @@ public abstract class TransportReplicationAction< this.transportPrimaryAction = actionName + "[p]"; this.transportReplicaAction = actionName + "[r]"; - transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, this::handleOperationRequest); + transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, requestReader, this::handleOperationRequest); - transportService.registerRequestHandler(transportPrimaryAction, - () -> new ConcreteShardRequest<>(request), executor, forceExecutionOnPrimary, true, this::handlePrimaryRequest); + transportService.registerRequestHandler(transportPrimaryAction, executor, forceExecutionOnPrimary, true, + in -> new ConcreteShardRequest<>(requestReader, in), this::handlePrimaryRequest); // we must never reject on because of thread pool capacity on replicas - transportService.registerRequestHandler(transportReplicaAction, () -> new ConcreteReplicaRequest<>(replicaRequest), - executor, true, true, this::handleReplicaRequest); + transportService.registerRequestHandler(transportReplicaAction, executor, true, true, + in -> new ConcreteReplicaRequest<>(replicaRequestReader, in), this::handleReplicaRequest); this.transportOptions = transportOptions(settings); @@ -1089,17 +1086,14 @@ public abstract class TransportReplicationAction< public static class ConcreteShardRequest extends TransportRequest { /** {@link AllocationId#getId()} of the shard this request is sent to **/ - private String targetAllocationID; + private final String targetAllocationID; + private final long primaryTerm; + private final R request; - private long primaryTerm; - - private R request; - - public ConcreteShardRequest(Supplier requestSupplier) { - request = requestSupplier.get(); - // null now, but will be populated by reading from the streams - targetAllocationID = null; - primaryTerm = UNASSIGNED_PRIMARY_TERM; + public ConcreteShardRequest(Writeable.Reader requestReader, StreamInput in) throws IOException { + targetAllocationID = in.readString(); + primaryTerm = in.readVLong(); + request = requestReader.read(in); } public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) { @@ -1135,10 +1129,8 @@ public abstract class TransportReplicationAction< } @Override - public void readFrom(StreamInput in) throws IOException { - targetAllocationID = in.readString(); - primaryTerm = in.readVLong(); - request.readFrom(in); + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -1168,23 +1160,11 @@ public abstract class TransportReplicationAction< protected static final class ConcreteReplicaRequest extends ConcreteShardRequest { - private long globalCheckpoint; - private long maxSeqNoOfUpdatesOrDeletes; + private final long globalCheckpoint; + private final long maxSeqNoOfUpdatesOrDeletes; - public ConcreteReplicaRequest(final Supplier requestSupplier) { - super(requestSupplier); - } - - public ConcreteReplicaRequest(final R request, final String targetAllocationID, final long primaryTerm, - final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes) { - super(request, targetAllocationID, primaryTerm); - this.globalCheckpoint = globalCheckpoint; - this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public ConcreteReplicaRequest(Writeable.Reader requestReader, StreamInput in) throws IOException { + super(requestReader, in); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { globalCheckpoint = in.readZLong(); } else { @@ -1199,6 +1179,18 @@ public abstract class TransportReplicationAction< } } + public ConcreteReplicaRequest(final R request, final String targetAllocationID, final long primaryTerm, + final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes) { + super(request, targetAllocationID, primaryTerm); + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; + } + + @Override + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 619beab5793..cb3f67aa99e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperParsingException; @@ -47,7 +48,6 @@ import org.elasticsearch.transport.TransportService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Supplier; /** * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. @@ -62,8 +62,8 @@ public abstract class TransportWriteAction< protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor, boolean forceExecutionOnPrimary) { + IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader request, + Writeable.Reader replicaRequest, String executor, boolean forceExecutionOnPrimary) { super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, indexNameExpressionResolver, request, replicaRequest, executor, true, forceExecutionOnPrimary); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 3693975ddab..bbd17ab4a72 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -845,8 +845,7 @@ public class UpdateRequest extends InstanceShardOperationRequest retryOnConflict = in.readVInt(); refreshPolicy = RefreshPolicy.readFrom(in); if (in.readBoolean()) { - doc = new IndexRequest(); - doc.readFrom(in); + doc = new IndexRequest(in); } if (in.getVersion().before(Version.V_7_0_0)) { String[] fields = in.readOptionalStringArray(); @@ -856,8 +855,7 @@ public class UpdateRequest extends InstanceShardOperationRequest } fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { - upsertRequest = new IndexRequest(); - upsertRequest.readFrom(in); + upsertRequest = new IndexRequest(in); } docAsUpsert = in.readBoolean(); if (in.getVersion().before(Version.V_7_0_0)) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index cd93356bb39..de171e88fbc 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -69,8 +69,7 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest { - private Request() { - super(); + private Request(StreamInput in) throws IOException { + super(in); } public Request(final ShardId shardId) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index d454c2de75b..918ce664aea 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -148,8 +148,9 @@ public class RetentionLeaseBackgroundSyncAction extends TransportReplicationActi return retentionLeases; } - public Request() { - + public Request(StreamInput in) throws IOException { + super(in); + retentionLeases = new RetentionLeases(in); } public Request(final ShardId shardId, final RetentionLeases retentionLeases) { @@ -159,9 +160,8 @@ public class RetentionLeaseBackgroundSyncAction extends TransportReplicationActi } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - retentionLeases = new RetentionLeases(in); + public void readFrom(final StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 26eb32a9f18..a8aa7fe6f8e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -157,8 +157,9 @@ public class RetentionLeaseSyncAction extends return retentionLeases; } - public Request() { - + public Request(StreamInput in) throws IOException { + super(in); + retentionLeases = new RetentionLeases(in); } public Request(final ShardId shardId, final RetentionLeases retentionLeases) { @@ -168,9 +169,8 @@ public class RetentionLeaseSyncAction extends } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - retentionLeases = new RetentionLeases(in); + public void readFrom(final StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 3ab31b7d725..e01cc511ba1 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -163,8 +163,7 @@ public class IndexRequestTests extends ESTestCase { BytesStreamOutput out = new BytesStreamOutput(); indexRequest.writeTo(out); StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); - IndexRequest serialized = new IndexRequest(); - serialized.readFrom(in); + IndexRequest serialized = new IndexRequest(in); assertEquals(XContentType.JSON, serialized.getContentType()); assertEquals(new BytesArray("{}"), serialized.source()); } @@ -173,14 +172,14 @@ public class IndexRequestTests extends ESTestCase { public void testSerializationOfEmptyRequestWorks() throws IOException { IndexRequest request = new IndexRequest("index"); assertNull(request.getContentType()); + assertEquals("index", request.index()); + try (BytesStreamOutput out = new BytesStreamOutput()) { request.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - IndexRequest serialized = new IndexRequest(); - serialized.readFrom(in); - assertNull(request.getContentType()); - assertEquals("index", request.index()); + IndexRequest serialized = new IndexRequest(in); + assertNull(serialized.getContentType()); + assertEquals("index", serialized.index()); } } } diff --git a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java index 230eccb0578..6aab44b722d 100644 --- a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java @@ -44,8 +44,7 @@ public class ResyncReplicationRequestTests extends ESTestCase { before.writeTo(out); final StreamInput in = out.bytes().streamInput(); - final ResyncReplicationRequest after = new ResyncReplicationRequest(); - after.readFrom(in); + final ResyncReplicationRequest after = new ResyncReplicationRequest(in); assertThat(after, equalTo(before)); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 4c91bfaa420..383b6ed304d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; @@ -118,13 +119,13 @@ public class BroadcastReplicationTests extends ESTestCase { threadPool = null; } - public void testNotStartedPrimary() throws InterruptedException, ExecutionException, IOException { + public void testNotStartedPrimary() throws InterruptedException, ExecutionException { final String index = "test"; setState(clusterService, state(index, randomBoolean(), randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); logger.debug("--> using initial state:\n{}", clusterService.state()); PlainActionFuture response = PlainActionFuture.newFuture(); - broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index), response); + broadcastReplicationAction.execute(new DummyBroadcastRequest(index), response); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { if (randomBoolean()) { shardRequests.v2().onFailure(new NoShardAvailableActionException(shardRequests.v1())); @@ -138,13 +139,13 @@ public class BroadcastReplicationTests extends ESTestCase { assertBroadcastResponse(2, 0, 0, response.get(), null); } - public void testStartedPrimary() throws InterruptedException, ExecutionException, IOException { + public void testStartedPrimary() throws InterruptedException, ExecutionException { final String index = "test"; setState(clusterService, state(index, randomBoolean(), ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state()); PlainActionFuture response = PlainActionFuture.newFuture(); - broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index), response); + broadcastReplicationAction.execute(new DummyBroadcastRequest(index), response); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { ReplicationResponse replicationResponse = new ReplicationResponse(); replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(1, 1)); @@ -225,7 +226,7 @@ public class BroadcastReplicationTests extends ESTestCase { @Override protected BasicReplicationRequest newShardRequest(DummyBroadcastRequest request, ShardId shardId) { - return new BasicReplicationRequest().setShardId(shardId); + return new BasicReplicationRequest(shardId); } @Override @@ -269,6 +270,12 @@ public class BroadcastReplicationTests extends ESTestCase { } public static class DummyBroadcastRequest extends BroadcastRequest { + DummyBroadcastRequest(String... indices) { + super(indices); + } + DummyBroadcastRequest(StreamInput in) throws IOException { + super(in); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 02988e7981a..3af5047fe22 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -440,12 +440,8 @@ public class ReplicationOperationTests extends ESTestCase { public AtomicBoolean processedOnPrimary = new AtomicBoolean(); public Set processedOnReplicas = ConcurrentCollections.newConcurrentSet(); - public Request() { - } - Request(ShardId shardId) { - this(); - this.shardId = shardId; + super(shardId); this.index = shardId.getIndexName(); this.waitForActiveShards = ActiveShardCount.NONE; // keep things simple diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 02e9ff3146c..a663841ac6a 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -130,6 +130,8 @@ import static org.mockito.Mockito.when; public class TransportReplicationActionTests extends ESTestCase { + private static final ShardId NO_SHARD_ID = null; + /** * takes a request that was sent by a {@link TransportReplicationAction} and captured * and returns the underlying request if it's wrapped or the original (cast to the expected type). @@ -231,7 +233,7 @@ public class TransportReplicationActionTests extends ESTestCase { { setStateWithBlock(clusterService, nonRetryableBlock, globalBlock); - Request request = globalBlock ? new Request() : new Request().index("index"); + Request request = globalBlock ? new Request(NO_SHARD_ID) : new Request(NO_SHARD_ID).index("index"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -246,7 +248,7 @@ public class TransportReplicationActionTests extends ESTestCase { { setStateWithBlock(clusterService, retryableBlock, globalBlock); - Request requestWithTimeout = (globalBlock ? new Request() : new Request().index("index")).timeout("5ms"); + Request requestWithTimeout = (globalBlock ? new Request(NO_SHARD_ID) : new Request(NO_SHARD_ID).index("index")).timeout("5ms"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -262,7 +264,7 @@ public class TransportReplicationActionTests extends ESTestCase { { setStateWithBlock(clusterService, retryableBlock, globalBlock); - Request request = globalBlock ? new Request() : new Request().index("index"); + Request request = globalBlock ? new Request(NO_SHARD_ID) : new Request(NO_SHARD_ID).index("index"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -281,7 +283,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertIndexShardUninitialized(); } { - Request requestWithTimeout = new Request().index("unknown").setShardId(new ShardId("unknown", "_na_", 0)).timeout("5ms"); + Request requestWithTimeout = new Request(new ShardId("unknown", "_na_", 0)).index("unknown").timeout("5ms"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -688,8 +690,8 @@ public class TransportReplicationActionTests extends ESTestCase { } }; TestAction.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable); - final Request request = new Request(); - Request replicaRequest = (Request) primary.perform(request).replicaRequest; + final Request request = new Request(NO_SHARD_ID); + primary.perform(request); final ElasticsearchException exception = new ElasticsearchException("testing"); primary.failShard("test", exception); @@ -716,7 +718,7 @@ public class TransportReplicationActionTests extends ESTestCase { proxy.performOn( TestShardRouting.newShardRouting(shardId, "NOT THERE", routingState == ShardRoutingState.RELOCATING ? state.nodes().iterator().next().getId() : null, false, routingState), - new Request(), + new Request(NO_SHARD_ID), randomNonNegativeLong(), randomNonNegativeLong(), listener); @@ -727,7 +729,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardRouting replica = randomFrom(shardRoutings.replicaShards().stream() .filter(ShardRouting::assignedToNode).collect(Collectors.toList())); listener = new PlainActionFuture<>(); - proxy.performOn(replica, new Request(), randomNonNegativeLong(), randomNonNegativeLong(), listener); + proxy.performOn(replica, new Request(NO_SHARD_ID), randomNonNegativeLong(), randomNonNegativeLong(), listener); assertFalse(listener.isDone()); CapturingTransport.CapturedRequest[] captures = transport.getCapturedRequestsAndClear(); @@ -888,7 +890,7 @@ public class TransportReplicationActionTests extends ESTestCase { try { action.handleReplicaRequest( new TransportReplicationAction.ConcreteReplicaRequest<>( - new Request().setShardId(shardId), replicaRouting.allocationId().getId(), randomNonNegativeLong(), + new Request(shardId), replicaRouting.allocationId().getId(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), createTransportChannel(new PlainActionFuture<>()), task); } catch (ElasticsearchException e) { @@ -1020,7 +1022,7 @@ public class TransportReplicationActionTests extends ESTestCase { } }; final PlainActionFuture listener = new PlainActionFuture<>(); - final Request request = new Request().setShardId(shardId); + final Request request = new Request(shardId); final long checkpoint = randomNonNegativeLong(); final long maxSeqNoOfUpdatesOrDeletes = randomNonNegativeLong(); action.handleReplicaRequest( @@ -1088,7 +1090,7 @@ public class TransportReplicationActionTests extends ESTestCase { } }; final PlainActionFuture listener = new PlainActionFuture<>(); - final Request request = new Request().setShardId(shardId); + final Request request = new Request(shardId); final long checkpoint = randomNonNegativeLong(); final long maxSeqNoOfUpdates = randomNonNegativeLong(); action.handleReplicaRequest( @@ -1166,13 +1168,12 @@ public class TransportReplicationActionTests extends ESTestCase { public AtomicInteger processedOnReplicas = new AtomicInteger(); public AtomicBoolean isRetrySet = new AtomicBoolean(false); - public Request() { + Request(StreamInput in) throws IOException { + super(in); } - Request(ShardId shardId) { - this(); - this.shardId = shardId; - this.index = shardId.getIndexName(); + Request(@Nullable ShardId shardId) { + super(shardId); this.waitForActiveShards = ActiveShardCount.NONE; // keep things simple } @@ -1184,7 +1185,7 @@ public class TransportReplicationActionTests extends ESTestCase { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index b8c87acb56d..15886a517d3 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -400,7 +400,7 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe } private Request request() { - return new Request().setShardId(primary.shardId()); + return new Request(primary.shardId()); } /** @@ -558,6 +558,14 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe } static class Request extends ReplicationRequest { + Request(StreamInput in) throws IOException { + super(in); + } + + Request(ShardId shardId) { + super(shardId); + } + @Override public String toString() { return getTestClass().getName() + ".Request"; diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 3c2df0b59b2..5a35202506d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -62,6 +63,7 @@ import org.junit.Before; import org.junit.BeforeClass; import org.mockito.ArgumentCaptor; +import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.Locale; @@ -522,8 +524,12 @@ public class TransportWriteActionTests extends ESTestCase { } private static class TestRequest extends ReplicatedWriteRequest { + TestRequest(StreamInput in) throws IOException { + super(in); + } + TestRequest() { - setShardId(new ShardId("test", "test", 1)); + super(new ShardId("test", "test", 1)); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index e9edae6468d..9b70aefa56b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -145,15 +145,13 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase return metaData.build(); } - protected IndexRequest copyIndexRequest(IndexRequest inRequest) throws IOException { - final IndexRequest outRequest = new IndexRequest(); + IndexRequest copyIndexRequest(IndexRequest inRequest) throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { inRequest.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - outRequest.readFrom(in); + return new IndexRequest(in); } } - return outRequest; } protected DiscoveryNode getDiscoveryNode(String id) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java index cf9239af740..f05a616c956 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java @@ -16,11 +16,15 @@ import java.util.List; public final class BulkShardOperationsRequest extends ReplicatedWriteRequest { - private String historyUUID; - private List operations; - private long maxSeqNoOfUpdatesOrDeletes; + private final String historyUUID; + private final List operations; + private final long maxSeqNoOfUpdatesOrDeletes; - public BulkShardOperationsRequest() { + public BulkShardOperationsRequest(StreamInput in) throws IOException { + super(in); + historyUUID = in.readString(); + maxSeqNoOfUpdatesOrDeletes = in.readZLong(); + operations = in.readList(Translog.Operation::readOperation); } public BulkShardOperationsRequest(final ShardId shardId, @@ -47,11 +51,8 @@ public final class BulkShardOperationsRequest extends ReplicatedWriteRequest Date: Fri, 5 Apr 2019 11:55:21 -0700 Subject: [PATCH 09/45] [Docs] Change example to show col headers (#40822) Command needs `?v` so user can see the column headers. Otherwise the instructions in the note about checking the init and relo columns don't make sense --- docs/reference/upgrade/rolling_upgrade.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index e6ed7bb4846..e7a9ca09baa 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -107,7 +107,7 @@ You can check progress by submitting a <> request: [source,sh] -------------------------------------------------- -GET _cat/health +GET _cat/health?v -------------------------------------------------- // CONSOLE From f34663282c694b8dffa3a9c5899288444058f28a Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 5 Apr 2019 12:06:06 -0600 Subject: [PATCH 10/45] Update apache httpclient to version 4.5.8 (#40875) This change updates our version of httpclient to version 4.5.8, which contains the fix for HTTPCLIENT-1968, which is a bug where the client started re-writing paths that contained encoded reserved characters with their unreserved form. --- buildSrc/version.properties | 2 +- .../rest/licenses/httpclient-4.5.7.jar.sha1 | 1 - .../rest/licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-4.5.8.jar.sha1 | 1 + .../core/licenses/httpclient-4.5.7.jar.sha1 | 1 - .../core/licenses/httpclient-4.5.8.jar.sha1 | 1 + .../licenses/httpclient-cache-4.5.7.jar.sha1 | 1 - .../licenses/httpclient-cache-4.5.8.jar.sha1 | 1 + .../xpack/watcher/common/http/HttpClient.java | 46 ++++++++++--------- 20 files changed, 34 insertions(+), 32 deletions(-) delete mode 100644 client/rest/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 client/rest/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 client/sniffer/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 client/sniffer/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/httpclient-4.5.7.jar.sha1 create mode 100644 x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/httpclient-cache-4.5.7.jar.sha1 create mode 100644 x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f026e1603d6..59db828eb0a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -32,7 +32,7 @@ bouncycastle = 1.61 # test dependencies randomizedrunner = 2.7.1 junit = 4.12 -httpclient = 4.5.7 +httpclient = 4.5.8 httpcore = 4.4.11 httpasyncclient = 4.1.4 commonslogging = 1.1.3 diff --git a/client/rest/licenses/httpclient-4.5.7.jar.sha1 b/client/rest/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/client/rest/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-4.5.8.jar.sha1 b/client/rest/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/client/rest/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient-4.5.7.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/client/sniffer/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient-4.5.8.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/client/sniffer/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.7.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/plugins/discovery-azure-classic/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.7.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/plugins/discovery-ec2/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.7.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/plugins/discovery-gce/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.7.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/plugins/repository-gcs/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpclient-4.5.7.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/plugins/repository-s3/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpclient-4.5.7.jar.sha1 b/x-pack/plugin/core/licenses/httpclient-4.5.7.jar.sha1 deleted file mode 100644 index a8b7cc0d994..00000000000 --- a/x-pack/plugin/core/licenses/httpclient-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dda059f4908e1b548b7ba68d81a3b05897f27cb0 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 b/x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 00000000000..73f0d30c709 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/httpclient-cache-4.5.7.jar.sha1 b/x-pack/plugin/security/licenses/httpclient-cache-4.5.7.jar.sha1 deleted file mode 100644 index b121bd65421..00000000000 --- a/x-pack/plugin/security/licenses/httpclient-cache-4.5.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c13a0ce27c17831e5e5be6c751842006dcecb270 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 b/x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 new file mode 100644 index 00000000000..87db7aba09e --- /dev/null +++ b/x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 @@ -0,0 +1 @@ +bb984b73da2153285b660f3e278498abd94ccbb5 \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java index e0d3129e75f..39340778d33 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -23,8 +23,8 @@ import org.apache.http.client.methods.HttpHead; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.methods.HttpRequestWrapper; import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.client.utils.URIBuilder; import org.apache.http.client.utils.URIUtils; -import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.conn.ssl.DefaultHostnameVerifier; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; @@ -65,10 +65,13 @@ import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; +import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -314,33 +317,32 @@ public class HttpClient implements Closeable { } private Tuple createURI(HttpRequest request) { - // this could be really simple, as the apache http client has a UriBuilder class, however this class is always doing - // url path escaping, and we have done this already, so this would result in double escaping try { List qparams = new ArrayList<>(request.params.size()); request.params.forEach((k, v) -> qparams.add(new BasicNameValuePair(k, v))); - String format = URLEncodedUtils.format(qparams, "UTF-8"); - URI uri = URIUtils.createURI(request.scheme.scheme(), request.host, request.port, request.path, - Strings.isNullOrEmpty(format) ? null : format, null); - - if (uri.isAbsolute() == false) { - throw new IllegalStateException("URI [" + uri.toASCIIString() + "] must be absolute"); - } - final HttpHost httpHost = URIUtils.extractHost(uri); - // what a mess that we need to do this to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1968 - // in some cases the HttpClient will re-write the URI which drops the escaping for - // slashes within a path. This rewriting is done to obtain a relative URI when - // a proxy is not being used. To avoid this we can handle making it relative ourselves - if (request.path != null && request.path.contains("%2F")) { - final boolean isUsingProxy = (request.proxy != null && request.proxy.equals(HttpProxy.NO_PROXY) == false) || - HttpProxy.NO_PROXY.equals(settingsProxy) == false; - if (isUsingProxy == false) { - // we need a relative uri - uri = URIUtils.createURI(null, null, -1, request.path, Strings.isNullOrEmpty(format) ? null : format, null); + // this could be really simple, as the apache http client has a UriBuilder class, however this class is always doing + // url path escaping, and we have done this already, so this would result in double escaping + final List unescapedPathParts; + if (Strings.isEmpty(request.path)) { + unescapedPathParts = Collections.emptyList(); + } else { + final String[] pathParts = request.path.split("/"); + unescapedPathParts = new ArrayList<>(pathParts.length); + for (String part : pathParts) { + unescapedPathParts.add(URLDecoder.decode(part, StandardCharsets.UTF_8.name())); } } + + final URI uri = new URIBuilder() + .setScheme(request.scheme().scheme()) + .setHost(request.host) + .setPort(request.port) + .setPathSegments(unescapedPathParts) + .setParameters(qparams) + .build(); + final HttpHost httpHost = URIUtils.extractHost(uri); return new Tuple<>(httpHost, uri); - } catch (URISyntaxException e) { + } catch (URISyntaxException | UnsupportedEncodingException e) { throw new IllegalArgumentException(e); } } From 2fd1689341aae3a1e7169d90f6e25dc1631933bb Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 5 Apr 2019 16:39:44 -0400 Subject: [PATCH 11/45] Allow AVX-512 on JDK 11+ (#40828) We previously found a bug in the JVM where AVX-512 instructions could crash the JVM to crash with a segmentation fault. This bug impacted JDK 9 and JDK 10, but was most prominent on JDK 10 because AVX-512 was enabled there by default. In JDK 11, this bug is reported fixed so this commit restricts the disabling of AVX-512 to JDK 10 only. Since we no longer support JDK 10 for any versions that this commit will be integrated into (7.1, 8.0), we simply remove the disabling of this flag from the JVM options. --- distribution/src/config/jvm.options | 3 --- 1 file changed, 3 deletions(-) diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 2b30d6a87b4..58fe4721723 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -117,6 +117,3 @@ ${error.file} # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise # time/date parsing will break in an incompatible way for some date patterns and locals 9-:-Djava.locale.providers=COMPAT - -# temporary workaround for C2 bug with JDK 10 on hardware with AVX-512 -10-:-XX:UseAVX=2 From f4348843ba79f946ffb471b7762c6fe6baa6aa56 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 5 Apr 2019 14:26:22 -0700 Subject: [PATCH 12/45] [DOCS] Adds placeholder for 7.0.0 release notes --- docs/reference/release-notes.asciidoc | 2 ++ docs/reference/release-notes/7.0.0.asciidoc | 10 ++++++++++ 2 files changed, 12 insertions(+) create mode 100644 docs/reference/release-notes/7.0.0.asciidoc diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index a9757e82360..f3d62e889dc 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. +* <> * <> * <> * <> @@ -14,6 +15,7 @@ This section summarizes the changes in each release. -- +include::release-notes/7.0.0.asciidoc[] include::release-notes/7.0.0-rc2.asciidoc[] include::release-notes/7.0.0-rc1.asciidoc[] include::release-notes/7.0.0-beta1.asciidoc[] diff --git a/docs/reference/release-notes/7.0.0.asciidoc b/docs/reference/release-notes/7.0.0.asciidoc new file mode 100644 index 00000000000..fffd0f7467c --- /dev/null +++ b/docs/reference/release-notes/7.0.0.asciidoc @@ -0,0 +1,10 @@ +[[release-notes-7.0.0]] +== {es} version 7.0.0 + +coming[7.0.0] + +//These release notes include all changes made in the alpha, beta, and RC +//releases of 7.0.0. + +//Also see <>. + From aea4e6596f9be4b8849dd7b6f16ff99157110997 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Sat, 6 Apr 2019 00:42:14 +0300 Subject: [PATCH 13/45] Source additional files correctly in elasticsearch-cli (#40890) Since we only source additional sources from the same dir as our cli scripts, resolve the path relevant to $ES_HOME --- distribution/src/bin/elasticsearch-cli | 2 +- .../packaging/test/ArchiveTestCase.java | 29 +++++++++++++++++++ .../packaging/util/Installation.java | 3 ++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli index 5699b3feb58..ae0c88b2043 100644 --- a/distribution/src/bin/elasticsearch-cli +++ b/distribution/src/bin/elasticsearch-cli @@ -7,7 +7,7 @@ source "`dirname "$0"`"/elasticsearch-env IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES" for additional_source in "${additional_sources[@]}" do - source "`dirname "$0"`"/$additional_source + source "$ES_HOME"/bin/$additional_source done IFS=';' read -r -a additional_classpath_directories <<< "$ES_ADDITIONAL_CLASSPATH_DIRECTORIES" diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index 531ae1e3c4a..d427017d0b0 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -439,4 +439,33 @@ public abstract class ArchiveTestCase extends PackagingTestCase { assertThat(result.stdout, containsString("Master node was successfully bootstrapped")); } + public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception { + assumeThat(installation, is(notNullValue())); + + final Installation.Executables bin = installation.executables(); + final Shell sh = newShell(); + // Run the cli tools from the tmp dir + sh.setWorkingDirectory(getTempDir()); + + Platforms.PlatformAction action = () -> { + Result result = sh.run(bin.elasticsearchCertutil+ " -h"); + assertThat(result.stdout, + containsString("Simplifies certificate creation for use with the Elastic Stack")); + result = sh.run(bin.elasticsearchSyskeygen+ " -h"); + assertThat(result.stdout, + containsString("system key tool")); + result = sh.run(bin.elasticsearchSetupPasswords+ " -h"); + assertThat(result.stdout, + containsString("Sets the passwords for reserved users")); + result = sh.run(bin.elasticsearchUsers+ " -h"); + assertThat(result.stdout, + containsString("Manages elasticsearch file users")); + }; + + if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { + Platforms.onLinux(action); + Platforms.onWindows(action); + } + } + } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java index 0e29baaa2c8..9e3ba5b52e2 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java @@ -104,6 +104,9 @@ public class Installation { public final Path elasticsearchCertutil = platformExecutable("elasticsearch-certutil"); public final Path elasticsearchShard = platformExecutable("elasticsearch-shard"); public final Path elasticsearchNode = platformExecutable("elasticsearch-node"); + public final Path elasticsearchSetupPasswords = platformExecutable("elasticsearch-setup-passwords"); + public final Path elasticsearchSyskeygen = platformExecutable("elasticsearch-syskeygen"); + public final Path elasticsearchUsers = platformExecutable("elasticsearch-users"); private Path platformExecutable(String name) { final String platformExecutableName = Platforms.WINDOWS From ac58b9bdedaaf8bf2b86dc4240bab75b4c2d9be2 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 5 Apr 2019 17:45:30 -0400 Subject: [PATCH 14/45] Fix date index name processor default date_formats (#40915) This commit is a correction of a doc bug in the docs for the ingest date-index-name processor. The correct pattern is yyyy-MM-dd'T'HH:mm:ss.SSSXX. This is due to the transition from Joda time to Java time where Z does not mean the same thing between the two. --- docs/reference/ingest/processors/date-index-name.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/processors/date-index-name.asciidoc b/docs/reference/ingest/processors/date-index-name.asciidoc index e2f28425758..7f42bf09040 100644 --- a/docs/reference/ingest/processors/date-index-name.asciidoc +++ b/docs/reference/ingest/processors/date-index-name.asciidoc @@ -137,7 +137,7 @@ understands this to mean `2016-04-01` as is explained in the <>. | `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <>. -| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. +| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSXX | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. | `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. | `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid java time pattern is expected here. Supports <>. From fd51780de28205b3a77d29dcd26f233e3de9ba2b Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Fri, 5 Apr 2019 15:15:14 -0700 Subject: [PATCH 15/45] Move test classes to test root in Painless (#40873) This moves several test classes that were part of the main root to the test root. These were part of the main root due to limitations prior to whitelist customization. Without whitelist customization these can be moved to a test context and removed from the base whitelists as they should not be user facing. --- .../painless/spi/org.elasticsearch.txt | 27 ------------------- .../painless/AugmentationTests.java | 8 +++--- .../elasticsearch/painless/BasicAPITests.java | 2 +- .../painless/BasicExpressionTests.java | 12 ++++----- .../FeatureTestAugmentationObject.java} | 8 +++--- .../painless/FeatureTestObject.java} | 6 ++--- .../painless/FunctionRefTests.java | 12 ++++----- .../painless/GeneralCastTests.java | 6 ++--- .../elasticsearch/painless/LambdaTests.java | 2 +- .../elasticsearch/painless/OverloadTests.java | 8 +++--- .../painless/PainlessDocGenerator.java | 2 +- .../painless/ScriptTestCase.java | 17 ++++++------ .../painless/StaticTestObject.java} | 2 +- .../painless/node/NodeToStringTests.java | 24 ++++++++++++----- .../spi/org.elasticsearch.painless.test | 22 +++++++++++++++ 15 files changed, 82 insertions(+), 76 deletions(-) rename modules/lang-painless/src/{main/java/org/elasticsearch/painless/FeatureTestAugmentation.java => test/java/org/elasticsearch/painless/FeatureTestAugmentationObject.java} (81%) rename modules/lang-painless/src/{main/java/org/elasticsearch/painless/FeatureTest.java => test/java/org/elasticsearch/painless/FeatureTestObject.java} (95%) rename modules/lang-painless/src/{main/java/org/elasticsearch/painless/StaticTest.java => test/java/org/elasticsearch/painless/StaticTestObject.java} (96%) diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index b9e0e6525f6..412afc62dcf 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -240,30 +240,3 @@ class org.elasticsearch.index.query.IntervalFilterScript$Interval { int getEnd() int getGaps() } - -# for testing -class org.elasticsearch.painless.FeatureTest no_import { - int z - () - (int,int) - int getX() - int getY() - Integer getI() - void setX(int) - void setY(int) - void setI(Integer) - boolean overloadedStatic() - boolean overloadedStatic(boolean) - int staticNumberTest(Number) - Double mixedAdd(int, Byte, char, Float) - Object twoFunctionsOfX(Function,Function) - void listInput(List) - int org.elasticsearch.painless.FeatureTestAugmentation getTotal() - int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) -} - -# for testing -static_import { - int staticAddIntsTest(int, int) from_class org.elasticsearch.painless.StaticTest - float staticAddFloatsTest(float, float) from_class org.elasticsearch.painless.FeatureTest -} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index 8618194028b..a0d1c5a5891 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -190,13 +190,13 @@ public class AugmentationTests extends ScriptTestCase { } public void testFeatureTest() { - assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + + assertEquals(5, exec("org.elasticsearch.painless.FeatureTestObject ft = new org.elasticsearch.painless.FeatureTestObject();" + " ft.setX(3); ft.setY(2); return ft.getTotal()")); - assertEquals(5, exec("def ft = new org.elasticsearch.painless.FeatureTest();" + + assertEquals(5, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" + " ft.setX(3); ft.setY(2); return ft.getTotal()")); - assertEquals(8, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + + assertEquals(8, exec("org.elasticsearch.painless.FeatureTestObject ft = new org.elasticsearch.painless.FeatureTestObject();" + " ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); - assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTest();" + + assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" + " ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java index 6a775825117..371c3a5a3e5 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java @@ -127,7 +127,7 @@ public class BasicAPITests extends ScriptTestCase { } public void testPublicMemberAccess() { - assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + + assertEquals(5, exec("org.elasticsearch.painless.FeatureTestObject ft = new org.elasticsearch.painless.FeatureTestObject();" + "ft.z = 5; return ft.z;")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index b729c6769c5..4269a93e4dc 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -191,11 +191,11 @@ public class BasicExpressionTests extends ScriptTestCase { assertNull( exec("def a = null; return a?.length()")); assertEquals(3, exec("def a = 'foo'; return a?.length()")); // Read shortcut - assertMustBeNullable( "org.elasticsearch.painless.FeatureTest a = null; return a?.x"); + assertMustBeNullable( "org.elasticsearch.painless.FeatureTestObject a = null; return a?.x"); assertMustBeNullable( - "org.elasticsearch.painless.FeatureTest a = new org.elasticsearch.painless.FeatureTest(); return a?.x"); + "org.elasticsearch.painless.FeatureTestObject a = new org.elasticsearch.painless.FeatureTestObject(); return a?.x"); assertNull( exec("def a = null; return a?.x")); - assertEquals(0, exec("def a = new org.elasticsearch.painless.FeatureTest(); return a?.x")); + assertEquals(0, exec("def a = new org.elasticsearch.painless.FeatureTestObject(); return a?.x")); // Maps // Call @@ -222,7 +222,7 @@ public class BasicExpressionTests extends ScriptTestCase { assertEquals(2, exec("def a = new int[] {2, 3}; return a?.length")); // Results from maps (should just work but let's test anyway) - FeatureTest t = new FeatureTest(); + FeatureTestObject t = new FeatureTestObject(); assertNull( exec("Map a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true)); assertNull( exec("Map a = ['thing': params.t]; return a.other?.x", singletonMap("t", t), true)); assertNull( exec("def a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true)); @@ -254,8 +254,8 @@ public class BasicExpressionTests extends ScriptTestCase { + "return a.missing_length", true)); // Writes, all unsupported at this point -// assertEquals(null, exec("org.elasticsearch.painless.FeatureTest a = null; return a?.x")); // Read field -// assertEquals(null, exec("org.elasticsearch.painless.FeatureTest a = null; a?.x = 7; return a?.x")); // Write field +// assertEquals(null, exec("org.elasticsearch.painless.FeatureTestObject a = null; return a?.x")); // Read field +// assertEquals(null, exec("org.elasticsearch.painless.FeatureTestObject a = null; a?.x = 7; return a?.x")); // Write field // assertEquals(null, exec("Map a = null; a?.other = 'wow'; return a?.other")); // Write shortcut // assertEquals(null, exec("def a = null; a?.other = 'cat'; return a?.other")); // Write shortcut // assertEquals(null, exec("Map a = ['thing': 'bar']; a.other?.cat = 'no'; return a.other?.cat")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTestAugmentation.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FeatureTestAugmentationObject.java similarity index 81% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTestAugmentation.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/FeatureTestAugmentationObject.java index c1ea19defb9..ca9fef97df2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTestAugmentation.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FeatureTestAugmentationObject.java @@ -19,14 +19,14 @@ package org.elasticsearch.painless; -public class FeatureTestAugmentation { - public static int getTotal(FeatureTest ft) { +public class FeatureTestAugmentationObject { + public static int getTotal(FeatureTestObject ft) { return ft.getX() + ft.getY(); } - public static int addToTotal(FeatureTest ft, int add) { + public static int addToTotal(FeatureTestObject ft, int add) { return getTotal(ft) + add; } - private FeatureTestAugmentation() {} + private FeatureTestAugmentationObject() {} } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FeatureTestObject.java similarity index 95% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/FeatureTestObject.java index 8806a388745..59a1a62d7b8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTest.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FeatureTestObject.java @@ -23,7 +23,7 @@ import java.util.function.Function; */ /** Currently just a dummy class for testing a few features not yet exposed by whitelist! */ -public class FeatureTest { +public class FeatureTestObject { /** static method that returns true */ public static boolean overloadedStatic() { return true; @@ -51,11 +51,11 @@ public class FeatureTest { private Integer i; /** empty ctor */ - public FeatureTest() { + public FeatureTestObject() { } /** ctor with params */ - public FeatureTest(int x, int y) { + public FeatureTestObject(int x, int y) { this.x = x; this.y = y; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java index 96360a62868..2aa6be428ee 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java @@ -46,12 +46,12 @@ public class FunctionRefTests extends ScriptTestCase { public void testQualifiedStaticMethodReference() { assertEquals(true, - exec("List l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTest::overloadedStatic).findFirst().get()")); + exec("List l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTestObject::overloadedStatic).findFirst().get()")); } public void testQualifiedStaticMethodReferenceDef() { assertEquals(true, - exec("def l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTest::overloadedStatic).findFirst().get()")); + exec("def l = [true]; l.stream().map(org.elasticsearch.painless.FeatureTestObject::overloadedStatic).findFirst().get()")); } public void testQualifiedVirtualMethodReference() { @@ -133,7 +133,7 @@ public class FunctionRefTests extends ScriptTestCase { assertEquals("testingcdefg", exec( "String x = 'testing';" + "String y = 'abcdefg';" + - "org.elasticsearch.painless.FeatureTest test = new org.elasticsearch.painless.FeatureTest(2,3);" + + "org.elasticsearch.painless.FeatureTestObject test = new org.elasticsearch.painless.FeatureTestObject(2,3);" + "return test.twoFunctionsOfX(x::concat, y::substring);")); } @@ -141,7 +141,7 @@ public class FunctionRefTests extends ScriptTestCase { assertEquals("testingcdefg", exec( "def x = 'testing';" + "def y = 'abcdefg';" + - "org.elasticsearch.painless.FeatureTest test = new org.elasticsearch.painless.FeatureTest(2,3);" + + "org.elasticsearch.painless.FeatureTestObject test = new org.elasticsearch.painless.FeatureTestObject(2,3);" + "return test.twoFunctionsOfX(x::concat, y::substring);")); } @@ -149,7 +149,7 @@ public class FunctionRefTests extends ScriptTestCase { assertEquals("testingcdefg", exec( "String x = 'testing';" + "String y = 'abcdefg';" + - "def test = new org.elasticsearch.painless.FeatureTest(2,3);" + + "def test = new org.elasticsearch.painless.FeatureTestObject(2,3);" + "return test.twoFunctionsOfX(x::concat, y::substring);")); } @@ -157,7 +157,7 @@ public class FunctionRefTests extends ScriptTestCase { assertEquals("testingcdefg", exec( "def x = 'testing';" + "def y = 'abcdefg';" + - "def test = new org.elasticsearch.painless.FeatureTest(2,3);" + + "def test = new org.elasticsearch.painless.FeatureTestObject(2,3);" + "return test.twoFunctionsOfX(x::concat, y::substring);")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java index 353146211f3..f1bb849b20c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java @@ -333,15 +333,15 @@ public class GeneralCastTests extends ScriptTestCase { assertEquals(1, exec("def y = 2.0; y.compareTo(1);")); assertEquals(1, exec("int x = 1; def y = 2.0; y.compareTo(x);")); assertEquals(-1, exec("Integer x = Integer.valueOf(3); def y = 2.0; y.compareTo(x);")); - assertEquals(2, exec("def f = new org.elasticsearch.painless.FeatureTest(); f.i = (byte)2; f.i")); + assertEquals(2, exec("def f = new org.elasticsearch.painless.FeatureTestObject(); f.i = (byte)2; f.i")); assertEquals(4.0, exec( - "def x = new org.elasticsearch.painless.FeatureTest(); " + + "def x = new org.elasticsearch.painless.FeatureTestObject(); " + "Byte i = Byte.valueOf(3); " + "byte j = 1;" + "Short s = Short.valueOf(-2);" + "x.mixedAdd(j, i, (char)2, s)" )); - assertNull(exec("def f = new org.elasticsearch.painless.FeatureTest(); f.i = null; f.i")); + assertNull(exec("def f = new org.elasticsearch.painless.FeatureTestObject(); f.i = null; f.i")); expectScriptThrows(ClassCastException.class, () -> exec("def x = 2.0; def y = 1; y.compareTo(x);")); expectScriptThrows(ClassCastException.class, () -> exec("float f = 1.0f; def y = 1; y.compareTo(f);")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java index 184e97c4a47..e55edc123c8 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java @@ -112,7 +112,7 @@ public class LambdaTests extends ScriptTestCase { public void testTwoLambdas() { assertEquals("testingcdefg", exec( - "org.elasticsearch.painless.FeatureTest test = new org.elasticsearch.painless.FeatureTest(2,3);" + + "org.elasticsearch.painless.FeatureTestObject test = new org.elasticsearch.painless.FeatureTestObject(2,3);" + "return test.twoFunctionsOfX(x -> 'testing'.concat(x), y -> 'abcdefg'.substring(y))")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/OverloadTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/OverloadTests.java index 52c28799fae..24abc840868 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/OverloadTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/OverloadTests.java @@ -41,14 +41,14 @@ public class OverloadTests extends ScriptTestCase { } public void testConstructor() { - assertEquals(true, exec("org.elasticsearch.painless.FeatureTest f = new org.elasticsearch.painless.FeatureTest();" + + assertEquals(true, exec("org.elasticsearch.painless.FeatureTestObject f = new org.elasticsearch.painless.FeatureTestObject();" + "return f.x == 0 && f.y == 0;")); - assertEquals(true, exec("org.elasticsearch.painless.FeatureTest f = new org.elasticsearch.painless.FeatureTest(1, 2);" + + assertEquals(true, exec("org.elasticsearch.painless.FeatureTestObject f = new org.elasticsearch.painless.FeatureTestObject(1, 2);" + "return f.x == 1 && f.y == 2;")); } public void testStatic() { - assertEquals(true, exec("return org.elasticsearch.painless.FeatureTest.overloadedStatic();")); - assertEquals(false, exec("return org.elasticsearch.painless.FeatureTest.overloadedStatic(false);")); + assertEquals(true, exec("return org.elasticsearch.painless.FeatureTestObject.overloadedStatic();")); + assertEquals(false, exec("return org.elasticsearch.painless.FeatureTestObject.overloadedStatic(false);")); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index ac76a8c0408..c1ba6bfbe1c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -19,8 +19,8 @@ package org.elasticsearch.painless; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.painless.lookup.PainlessClass; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 37b3c3d8cb8..0f04fa92194 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -22,14 +22,14 @@ package org.elasticsearch.painless; import junit.framework.AssertionFailedError; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.antlr.Walker; -import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.lookup.PainlessLookupBuilder; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptException; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -45,8 +45,6 @@ import static org.hamcrest.Matchers.hasSize; * Typically just asserts the output of {@code exec()} */ public abstract class ScriptTestCase extends ESTestCase { - private static final PainlessLookup PAINLESS_LOOKUP = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); - protected PainlessScriptEngine scriptEngine; @Before @@ -66,7 +64,9 @@ public abstract class ScriptTestCase extends ESTestCase { */ protected Map, List> scriptContexts() { Map, List> contexts = new HashMap<>(); - contexts.put(PainlessTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + List whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + whitelists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.painless.test")); + contexts.put(PainlessTestScript.CONTEXT, whitelists); return contexts; } @@ -91,12 +91,13 @@ public abstract class ScriptTestCase extends ESTestCase { public Object exec(String script, Map vars, Map compileParams, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(PAINLESS_LOOKUP, PainlessTestScript.class); + ScriptClassInfo scriptClassInfo = + new ScriptClassInfo(scriptEngine.getContextsToLookups().get(PainlessTestScript.CONTEXT), PainlessTestScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); - Walker.buildPainlessTree( - scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings, PAINLESS_LOOKUP, null); + Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings, + scriptEngine.getContextsToLookups().get(PainlessTestScript.CONTEXT), null); } // test actual script execution PainlessTestScript.Factory factory = scriptEngine.compile(null, script, PainlessTestScript.CONTEXT, compileParams); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/StaticTest.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StaticTestObject.java similarity index 96% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/StaticTest.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/StaticTestObject.java index 4a4f27b8f21..fc805317850 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/StaticTest.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StaticTestObject.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless; -public class StaticTest { +public class StaticTestObject { public static int staticAddIntsTest(int x, int y) { return x + y; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 08a5d0cb5fd..1b39b56286b 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -20,12 +20,12 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.CompilerSettings; -import org.elasticsearch.painless.FeatureTest; +import org.elasticsearch.painless.FeatureTestObject; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.action.PainlessExecuteAction.PainlessTestScript; import org.elasticsearch.painless.ScriptClassInfo; +import org.elasticsearch.painless.action.PainlessExecuteAction.PainlessTestScript; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessClass; @@ -35,8 +35,10 @@ import org.elasticsearch.painless.lookup.PainlessLookupBuilder; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; import org.elasticsearch.test.ESTestCase; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -49,7 +51,6 @@ import static org.elasticsearch.painless.node.SSource.MainMethodReserved; * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. */ public class NodeToStringTests extends ESTestCase { - private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); public void testEAssignment() { assertToString( @@ -379,10 +380,11 @@ public class NodeToStringTests extends ESTestCase { + "return a.length"); assertToString( "(SSource\n" - + " (SDeclBlock (SDeclaration org.elasticsearch.painless.FeatureTest a (ENewObj org.elasticsearch.painless.FeatureTest)))\n" + + " (SDeclBlock (SDeclaration org.elasticsearch.painless.FeatureTestObject a" + + " (ENewObj org.elasticsearch.painless.FeatureTestObject)))\n" + " (SExpression (EAssignment (PField (EVariable a) x) = (ENumeric 10)))\n" + " (SReturn (PField (EVariable a) x)))", - "org.elasticsearch.painless.FeatureTest a = new org.elasticsearch.painless.FeatureTest();\n" + "org.elasticsearch.painless.FeatureTestObject a = new org.elasticsearch.painless.FeatureTestObject();\n" + "a.x = 10;\n" + "return a.x"); } @@ -497,10 +499,10 @@ public class NodeToStringTests extends ESTestCase { public void testPSubShortcut() { Location l = new Location(getTestName(), 0); - PainlessClass s = painlessLookup.lookupPainlessClass(FeatureTest.class); + PainlessClass s = painlessLookup.lookupPainlessClass(FeatureTestObject.class); PainlessMethod getter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("getX", 0)); PainlessMethod setter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("setX", 1)); - PSubShortcut node = new PSubShortcut(l, "x", FeatureTest.class.getName(), getter, setter); + PSubShortcut node = new PSubShortcut(l, "x", FeatureTestObject.class.getName(), getter, setter); node.prefix = new EVariable(l, "a"); assertEquals("(PSubShortcut (EVariable a) x)", node.toString()); assertEquals("(PSubNullSafeCallInvoke (PSubShortcut (EVariable a) x))", @@ -892,6 +894,14 @@ public class NodeToStringTests extends ESTestCase { + "}"); } + private final PainlessLookup painlessLookup; + + public NodeToStringTests() { + List whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + whitelists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.painless.test")); + painlessLookup = PainlessLookupBuilder.buildFromWhitelists(whitelists); + } + private void assertToString(String expected, String code) { assertEquals(expected, walk(code).toString()); } diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test index 71a0c0240f7..37faf50a701 100644 --- a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test +++ b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test @@ -2,7 +2,29 @@ class org.elasticsearch.painless.BindingsTests$BindingsTestScript { } +class org.elasticsearch.painless.FeatureTestObject no_import { + int z + () + (int,int) + int getX() + int getY() + Integer getI() + void setX(int) + void setY(int) + void setI(Integer) + boolean overloadedStatic() + boolean overloadedStatic(boolean) + int staticNumberTest(Number) + Double mixedAdd(int, Byte, char, Float) + Object twoFunctionsOfX(Function,Function) + void listInput(List) + int org.elasticsearch.painless.FeatureTestAugmentationObject getTotal() + int org.elasticsearch.painless.FeatureTestAugmentationObject addToTotal(int) +} + static_import { + int staticAddIntsTest(int, int) from_class org.elasticsearch.painless.StaticTestObject + float staticAddFloatsTest(float, float) from_class org.elasticsearch.painless.FeatureTestObject int addWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingsTests$BindingTestClass int addThisWithState(BindingsTests.BindingsTestScript, int, int, int, double) bound_to org.elasticsearch.painless.BindingsTests$ThisBindingTestClass int addEmptyThisWithState(BindingsTests.BindingsTestScript, int) bound_to org.elasticsearch.painless.BindingsTests$EmptyThisBindingTestClass From b0ce3e0a10559b2e29ebfcf122db90f223d10c32 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Fri, 5 Apr 2019 15:31:23 -0700 Subject: [PATCH 16/45] [Docs] Remove extraneous text (#40914) Removes text that was likely introduced by copy/paste error. --- docs/reference/upgrade.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/reference/upgrade.asciidoc b/docs/reference/upgrade.asciidoc index 0079d8bbecd..e5e447aff75 100644 --- a/docs/reference/upgrade.asciidoc +++ b/docs/reference/upgrade.asciidoc @@ -37,7 +37,6 @@ deprecation warnings are logged when the log level is set to `WARN`. to your code and configuration for {version}. . If you use custom plugins, make sure compatible versions are available. . Test upgrades in a dev environment before upgrading your production cluster. -before upgrading. . <> You must have a snapshot of your data to roll back to an earlier version. From a69ff8221f730a8d7a791c5136fbbea081a3ffd7 Mon Sep 17 00:00:00 2001 From: debadair Date: Fri, 5 Apr 2019 16:38:31 -0700 Subject: [PATCH 17/45] [DOCS] Added settings page for ILM. (#40880) * [DOCS] Added settings page for ILM. * [DOCS] Adding ILM settings file * [DOCS] Moved the ILM settings to a separate section * [DOCS] Linked to the rollover docs. * [DOCS] Tweaked the "required" wording. --- docs/reference/ilm/getting-started-ilm.asciidoc | 8 +++++++- docs/reference/index-modules.asciidoc | 10 ++++++++-- docs/reference/settings/ilm-settings.asciidoc | 15 +++++++++++++++ docs/reference/setup.asciidoc | 2 ++ 4 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 docs/reference/settings/ilm-settings.asciidoc diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index f06c95f49c0..3d193572164 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -15,7 +15,9 @@ our writing index. We wish to roll over the index after it reaches a size of 50 gigabytes, or has been created 30 days ago, and then delete the index after 90 days. -=== Setting up a new policy +[float] +[[ilm-gs-create-policy]] +=== Setting up a policy There are many new features introduced by {ilm-init}, but we will only focus on a few that are needed for our example. For starters, we will use the @@ -64,6 +66,8 @@ the index being written to after it reaches 50 gigabytes, or it is 30 days old. The rollover will occur when either of these conditions is true. The index will be deleted 90 days after it is rolled over. +[float] +[[ilm-gs-apply-policy]] === Applying a policy to our index There are <> to associate a @@ -135,6 +139,8 @@ index being the index that is being written to at a time. Rollover swaps the write index to be the new index created from rollover, and sets the alias to be read-only for the source index. +[float] +[[ilm-gs-check-progress]] === Checking progress Now that we have an index managed by our policy, how do we tell what is going diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 6c6858125c5..7848a48fa58 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -184,8 +184,8 @@ specific index module: `index.blocks.write`:: - Set to `true` to disable data write operations against the index. Unlike `read_only`, - this setting does not affect metadata. For instance, you can close an index with a `write` + Set to `true` to disable data write operations against the index. Unlike `read_only`, + this setting does not affect metadata. For instance, you can close an index with a `write` block, but not an index with a `read_only` block. `index.blocks.metadata`:: @@ -285,6 +285,12 @@ Other index settings are available in index modules: Control over the transaction log and background flush operations. +[float] +=== [xpack]#{xpack} index settings# + +<>:: + + Specify the lifecycle policy and rollover alias for an index. -- include::index-modules/analysis.asciidoc[] diff --git a/docs/reference/settings/ilm-settings.asciidoc b/docs/reference/settings/ilm-settings.asciidoc new file mode 100644 index 00000000000..7de2036273b --- /dev/null +++ b/docs/reference/settings/ilm-settings.asciidoc @@ -0,0 +1,15 @@ +[role="xpack"] +[[ilm-settings]] +=== {ilm-cap} settings + +These index-level {ilm-init} settings are typically configured through index +templates. For more information, see <>. + +`index.lifecycle.name`:: +The name of the policy to use to manage the index. + +`index.lifecycle.rollover_alias`:: +The index alias to update when the index rolls over. Specify when using a +policy that contains a rollover action. When the index rolls over, the alias is +updated to reflect that the index is no longer the write index. For more +information about rollover, see <>. diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index d54941ed6aa..d2d46670809 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -52,6 +52,8 @@ include::settings/audit-settings.asciidoc[] include::settings/ccr-settings.asciidoc[] +include::settings/ilm-settings.asciidoc[] + include::settings/license-settings.asciidoc[] include::settings/ml-settings.asciidoc[] From e44e84ab42e1daa8ca0a31997e73a9554dc317e7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 6 Apr 2019 10:17:29 -0400 Subject: [PATCH 18/45] Suppress lease background sync failures if stopping (#40902) If the transport service is stopped, likely because we are shutting down, and a retention lease background sync fires the logs will display a warn message and stacktrace. Yet, this situaton is harmless and can happen as a normal course of business when shutting down. This commit suppresses the log messages in this case. --- .../main/java/org/elasticsearch/ExceptionsHelper.java | 9 +++++++++ .../support/replication/ReplicationOperation.java | 8 +++----- .../seqno/RetentionLeaseBackgroundSyncAction.java | 10 ++++++++-- .../org/elasticsearch/transport/TransportService.java | 10 ++++++++-- .../seqno/RetentionLeaseBackgroundSyncActionTests.java | 7 ++++++- 5 files changed, 34 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e0525127ee7..e4269a375dd 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportException; import java.io.IOException; import java.io.PrintWriter; @@ -193,6 +194,14 @@ public final class ExceptionsHelper { return null; } + public static boolean isTransportStoppedForAction(final Throwable t, final String action) { + final TransportException maybeTransport = + (TransportException) ExceptionsHelper.unwrap(t, TransportException.class); + return maybeTransport != null + && (maybeTransport.getMessage().equals("TransportService is closed stopped can't send request") + || maybeTransport.getMessage().equals("transport stopped, action: " + action)); + } + /** * Throws the specified exception. If null if specified then true is returned. */ diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 7fdb613c38b..22e90cfc135 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.transport.TransportException; import java.io.IOException; import java.util.ArrayList; @@ -205,10 +204,9 @@ public class ReplicationOperation< private void onNoLongerPrimary(Exception failure) { final Throwable cause = ExceptionsHelper.unwrapCause(failure); - final boolean nodeIsClosing = cause instanceof NodeClosedException - || (cause instanceof TransportException && - ("TransportService is closed stopped can't send request".equals(cause.getMessage()) - || "transport stopped, action: internal:cluster/shard/failure".equals(cause.getMessage()))); + final boolean nodeIsClosing = + cause instanceof NodeClosedException + || ExceptionsHelper.isTransportStoppedForAction(cause, "internal:cluster/shard/failure"); final String message; if (nodeIsClosing) { message = String.format(Locale.ROOT, diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 918ce664aea..570159cc74d 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -113,9 +113,15 @@ public class RetentionLeaseBackgroundSyncAction extends TransportReplicationActi ActionListener.wrap( r -> {}, e -> { - if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { - getLogger().warn(new ParameterizedMessage("{} retention lease background sync failed", shardId), e); + if (ExceptionsHelper.isTransportStoppedForAction(e, ACTION_NAME + "[p]")) { + // we are likely shutting down + return; } + if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) != null) { + // the shard is closed + return; + } + getLogger().warn(new ParameterizedMessage("{} retention lease background sync failed", shardId), e); })); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 1288f6fe16f..c8493edc979 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -274,6 +274,7 @@ public class TransportService extends AbstractLifecycleComponent implements Tran } @Override public void doRun() { + // cf. ExceptionsHelper#isTransportStoppedForAction TransportException ex = new TransportException("transport stopped, action: " + holderToNotify.action()); holderToNotify.handler().handleException(ex); } @@ -626,8 +627,13 @@ public class TransportService extends AbstractLifecycleComponent implements Tran } try { if (lifecycle.stoppedOrClosed()) { - // if we are not started the exception handling will remove the RequestHolder again and calls the handler to notify - // the caller. It will only notify if the toStop code hasn't done the work yet. + /* + * If we are not started the exception handling will remove the request holder again and calls the handler to notify the + * caller. It will only notify if toStop hasn't done the work yet. + * + * Do not edit this exception message, it is currently relied upon in production code! + */ + // TODO: make a dedicated exception for a stopped transport service? cf. ExceptionsHelper#isTransportStoppedForAction throw new TransportException("TransportService is closed stopped can't send request"); } if (timeoutHandler != null) { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 6ad7d5039ae..81ea56c6096 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportService; import org.mockito.ArgumentCaptor; @@ -204,9 +205,13 @@ public class RetentionLeaseBackgroundSyncActionTests extends ESTestCase { final Exception e = randomFrom( new AlreadyClosedException("closed"), new IndexShardClosedException(indexShard.shardId()), + new TransportException(randomFrom( + "failed", + "TransportService is closed stopped can't send request", + "transport stopped, action: indices:admin/seq_no/retention_lease_background_sync[p]")), new RuntimeException("failed")); listener.onFailure(e); - if (e instanceof AlreadyClosedException == false && e instanceof IndexShardClosedException == false) { + if (e.getMessage().equals("failed")) { final ArgumentCaptor captor = ArgumentCaptor.forClass(ParameterizedMessage.class); verify(retentionLeaseSyncActionLogger).warn(captor.capture(), same(e)); final ParameterizedMessage message = captor.getValue(); From 690039914412e8c04ea86d775554267635c15bb5 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 6 Apr 2019 17:23:51 -0400 Subject: [PATCH 19/45] Be lenient when parsing build flavor and type on the wire (#40734) Today we are strict when parsing build flavor and types off the wire. This means that if a later version introduces a new build flavor or type, an older version would not be able to parse what that new version is sending. For a practical example of this, we recently added the build type "docker", and this means that in a rolling upgrade scenario older nodes would not be able to understand the build type that the newer node is sending. This breaks clusters and is bad. We do not normally think of adding a new enumeration value as being a serialization breaking change, it is just not a lesson that we have learned before. We should be lenient here though, so that we can add future changes without running the risk of breaking ourselves horribly. It is either that, or we have super-strict testing infrastructure here yet still I fear the possibility of mistakes. This commit changes the parsing of build flavor and build type so that we are still strict at startup, yet we are lenient with values coming across the wire. This will help avoid us breaking rolling upgrades, or clients that are on an older version. --- .../main/java/org/elasticsearch/Build.java | 29 +++++++++---- .../action/main/MainResponse.java | 8 +++- .../java/org/elasticsearch/BuildTests.java | 42 +++++++++++++++++++ 3 files changed, 69 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index be37c56837d..1b1cd8d3e72 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -57,7 +57,7 @@ public class Build { return displayName; } - public static Flavor fromDisplayName(final String displayName) { + public static Flavor fromDisplayName(final String displayName, final boolean strict) { switch (displayName) { case "default": return Flavor.DEFAULT; @@ -66,7 +66,12 @@ public class Build { case "unknown": return Flavor.UNKNOWN; default: - throw new IllegalStateException("unexpected distribution flavor [" + displayName + "]; your distribution is broken"); + if (strict) { + final String message = "unexpected distribution flavor [" + displayName + "]; your distribution is broken"; + throw new IllegalStateException(message); + } else { + return Flavor.UNKNOWN; + } } } @@ -91,7 +96,7 @@ public class Build { this.displayName = displayName; } - public static Type fromDisplayName(final String displayName) { + public static Type fromDisplayName(final String displayName, final boolean strict) { switch (displayName) { case "deb": return Type.DEB; @@ -106,9 +111,14 @@ public class Build { case "unknown": return Type.UNKNOWN; default: - throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken"); + if (strict) { + throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken"); + } else { + return Type.UNKNOWN; + } } } + } static { @@ -119,8 +129,9 @@ public class Build { final boolean isSnapshot; final String version; - flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown")); - type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown")); + // these are parsed at startup, and we require that we are able to recognize the values passed in by the startup scripts + flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown"), true); + type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown"), true); final String esPrefix = "elasticsearch-" + Version.CURRENT; final URL url = getElasticsearchCodeSourceLocation(); @@ -214,12 +225,14 @@ public class Build { final Flavor flavor; final Type type; if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - flavor = Flavor.fromDisplayName(in.readString()); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + flavor = Flavor.fromDisplayName(in.readString(), false); } else { flavor = Flavor.OSS; } if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - type = Type.fromDisplayName(in.readString()); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + type = Type.fromDisplayName(in.readString(), false); } else { type = Type.UNKNOWN; } diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index 38d78fdc0c1..8b0e5c744e5 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -135,8 +135,12 @@ public class MainResponse extends ActionResponse implements ToXContentObject { final String buildType = (String) value.get("build_type"); response.build = new Build( - buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), - buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), + /* + * Be lenient when reading on the wire, the enumeration values from other versions might be different than what + * we know. + */ + buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor, false), + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType, false), (String) value.get("build_hash"), (String) value.get("build_date"), (boolean) value.get("build_snapshot"), diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index f1d48c08b39..e0d8140c708 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -35,7 +35,10 @@ import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.sameInstance; public class BuildTests extends ESTestCase { @@ -223,4 +226,43 @@ public class BuildTests extends ESTestCase { assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); } + + public void testFlavorParsing() { + for (final Build.Flavor flavor : Build.Flavor.values()) { + // strict or not should not impact parsing at all here + assertThat(Build.Flavor.fromDisplayName(flavor.displayName(), randomBoolean()), sameInstance(flavor)); + } + } + + public void testTypeParsing() { + for (final Build.Type type : Build.Type.values()) { + // strict or not should not impact parsing at all here + assertThat(Build.Type.fromDisplayName(type.displayName(), randomBoolean()), sameInstance(type)); + } + } + + public void testLenientFlavorParsing() { + final String displayName = randomAlphaOfLength(8); + assertThat(Build.Flavor.fromDisplayName(displayName, false), equalTo(Build.Flavor.UNKNOWN)); + } + + public void testStrictFlavorParsing() { + final String displayName = randomAlphaOfLength(8); + @SuppressWarnings("ResultOfMethodCallIgnored") final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> Build.Flavor.fromDisplayName(displayName, true)); + assertThat(e, hasToString(containsString("unexpected distribution flavor [" + displayName + "]; your distribution is broken"))); + } + + public void testLenientTypeParsing() { + final String displayName = randomAlphaOfLength(8); + assertThat(Build.Type.fromDisplayName(displayName, false), equalTo(Build.Type.UNKNOWN)); + } + + public void testStrictTypeParsing() { + final String displayName = randomAlphaOfLength(8); + @SuppressWarnings("ResultOfMethodCallIgnored") final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> Build.Type.fromDisplayName(displayName, true)); + assertThat(e, hasToString(containsString("unexpected distribution type [" + displayName + "]; your distribution is broken"))); + } + } From 4163e5976895e4a6d1f31de031ca8cbe367efd04 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 7 Apr 2019 10:16:35 -0400 Subject: [PATCH 20/45] Mute failing IndexShard local history test This test fails reliably with, so this commit mutes that test until a fix is available. --- .../test/java/org/elasticsearch/index/shard/IndexShardTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index da474e8d770..bf2499c6d1e 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1090,6 +1090,7 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(replicaShard, primaryShard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40929") public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); From d5fcbf2f4a824a0c221760ebb655b67fe38ad679 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Sun, 7 Apr 2019 21:43:35 +0200 Subject: [PATCH 21/45] refactor onStart and onFinish to take runnables and executed them guarded by state (#40855) refactor onStart and onFinish to take action listeners and execute them when indexer is in indexing state. --- .../core/indexing/AsyncTwoPhaseIndexer.java | 42 ++++++++------ .../indexing/AsyncTwoPhaseIndexerTests.java | 25 ++++---- .../transforms/DataFrameIndexer.java | 13 +++-- .../transforms/DataFrameTransformTask.java | 15 +++-- .../xpack/rollup/job/RollupIndexer.java | 40 +++++++++---- .../xpack/rollup/job/RollupJobTask.java | 3 +- .../job/RollupIndexerIndexingTests.java | 3 +- .../rollup/job/RollupIndexerStateTests.java | 58 ++++++++++++++----- 8 files changed, 131 insertions(+), 68 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index df8eeb71e61..e859e0db754 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -22,7 +22,7 @@ import java.util.concurrent.atomic.AtomicReference; /** * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). - * Only one background job can run simultaneously and {@link #onFinish()} is called when the job + * Only one background job can run simultaneously and {@link #onFinish} is called when the job * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. @@ -85,13 +85,10 @@ public abstract class AsyncTwoPhaseIndexer { @@ -148,17 +145,17 @@ public abstract class AsyncTwoPhaseIndexer { - try { + onStart(now, ActionListener.wrap(r -> { stats.markStartSearch(); doNextSearch(buildSearchRequest(), ActionListener.wrap(this::onSearchResponse, this::finishWithSearchFailure)); - } catch (Exception e) { - finishWithSearchFailure(e); - } + }, e -> { + finishAndSetState(); + onFailure(e); + })); }); logger.debug("Beginning to index [" + getJobId() + "], state: [" + currentState + "]"); return true; @@ -200,8 +197,9 @@ public abstract class AsyncTwoPhaseIndexer listener); /** * Executes the {@link SearchRequest} and calls nextPhase with the @@ -248,9 +246,12 @@ public abstract class AsyncTwoPhaseIndexer listener); /** * Called when a background job detects that the indexer is aborted causing the @@ -315,10 +316,11 @@ public abstract class AsyncTwoPhaseIndexer doSaveState(finishAndSetState(), position.get(), () -> {}), + e -> doSaveState(finishAndSetState(), position.get(), () -> {}))); + return; } @@ -337,6 +339,8 @@ public abstract class AsyncTwoPhaseIndexer listener) { assertThat(step, equalTo(0)); ++step; + listener.onResponse(null); } @Override @@ -98,7 +99,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { @Override protected void doSaveState(IndexerState state, Integer position, Runnable next) { - assertThat(step, equalTo(4)); + assertThat(step, equalTo(5)); ++step; next.run(); } @@ -109,10 +110,11 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } @Override - protected void onFinish() { - assertThat(step, equalTo(5)); + protected void onFinish(ActionListener listener) { + assertThat(step, equalTo(4)); ++step; isFinished.set(true); + listener.onResponse(null); } @Override @@ -153,9 +155,10 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } @Override - protected void onStartJob(long now) { + protected void onStart(long now, ActionListener listener) { assertThat(step, equalTo(0)); ++step; + listener.onResponse(null); } @Override @@ -170,20 +173,18 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { @Override protected void doSaveState(IndexerState state, Integer position, Runnable next) { - assertThat(step, equalTo(2)); - ++step; - next.run(); + fail("should not be called"); } @Override protected void onFailure(Exception exc) { - assertThat(step, equalTo(3)); + assertThat(step, equalTo(2)); ++step; isFinished.set(true); } @Override - protected void onFinish() { + protected void onFinish(ActionListener listener) { fail("should not be called"); } @@ -240,8 +241,8 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertTrue(ESTestCase.awaitBusy(() -> isFinished.get())); - assertThat(indexer.getStep(), equalTo(4)); + assertTrue(ESTestCase.awaitBusy(() -> isFinished.get(), 10000, TimeUnit.SECONDS)); + assertThat(indexer.getStep(), equalTo(3)); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index 090a9c9cfcc..238e531bba9 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.dataframe.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -51,10 +52,14 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer getFieldMappings(); @Override - protected void onStartJob(long now) { - QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery(); - - pivot = new Pivot(getConfig().getSource().getIndex(), queryBuilder, getConfig().getPivotConfig()); + protected void onStart(long now, ActionListener listener) { + try { + QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery(); + pivot = new Pivot(getConfig().getSource().getIndex(), queryBuilder, getConfig().getPivotConfig()); + listener.onResponse(null); + } catch (Exception e) { + listener.onFailure(e); + } } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 23884afec33..a4f3df8cbe5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -27,12 +27,12 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.common.notifications.Auditor; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; @@ -478,9 +478,14 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } @Override - protected void onFinish() { - auditor.info(transform.getId(), "Finished indexing for data frame transform"); - logger.info("Finished indexing for data frame transform [" + transform.getId() + "]"); + protected void onFinish(ActionListener listener) { + try { + auditor.info(transform.getId(), "Finished indexing for data frame transform"); + logger.info("Finished indexing for data frame transform [" + transform.getId() + "]"); + listener.onResponse(null); + } catch (Exception e) { + listener.onFailure(e); + } } @Override diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 1d5f9093a29..e051e912c48 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.rollup.job; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.unit.TimeValue; @@ -75,7 +76,21 @@ public abstract class RollupIndexer extends AsyncTwoPhaseIndexer initialState, Map initialPosition, AtomicBoolean upgradedDocumentID) { - super(executor, initialState, initialPosition, new RollupIndexerJobStats()); + this(executor, job, initialState, initialPosition, upgradedDocumentID, new RollupIndexerJobStats()); + } + + /** + * Ctr + * @param executor Executor to use to fire the first request of a background job. + * @param job The rollup job + * @param initialState Initial state for the indexer + * @param initialPosition The last indexed bucket of the task + * @param upgradedDocumentID whether job has updated IDs (for BWC) + * @param jobStats jobstats instance for collecting stats + */ + RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, + AtomicBoolean upgradedDocumentID, RollupIndexerJobStats jobStats) { + super(executor, initialState, initialPosition, jobStats); this.job = job; this.compositeBuilder = createCompositeBuilder(job.getConfig()); this.upgradedDocumentID = upgradedDocumentID; @@ -94,15 +109,20 @@ public abstract class RollupIndexer extends AsyncTwoPhaseIndexer listener) { + try { + // this is needed to exclude buckets that can still receive new documents. + DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); + long rounded = dateHisto.createRounding().round(now); + if (dateHisto.getDelay() != null) { + // if the job has a delay we filter all documents that appear before it. + maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); + } else { + maxBoundary = rounded; + } + listener.onResponse(null); + } catch (Exception e) { + listener.onFailure(e); } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index f545ab049d4..fecda3a2ce2 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -138,8 +138,9 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE } @Override - protected void onFinish() { + protected void onFinish(ActionListener listener) { logger.debug("Finished indexing for job [" + job.getConfig().getId() + "]"); + listener.onResponse(null); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index cdabb36d427..18365c2b485 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -580,8 +580,9 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { } @Override - protected void onFinish() { + protected void onFinish(ActionListener listener) { latch.countDown(); + listener.onResponse(null); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 37b3fd84ef1..b529a87027e 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; -import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.mockito.stubbing.Answer; @@ -44,11 +44,18 @@ import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; + public class RollupIndexerStateTests extends ESTestCase { private static class EmptyRollupIndexer extends RollupIndexer { + EmptyRollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, + Map initialPosition, boolean upgraded, RollupIndexerJobStats stats) { + super(executor, job, initialState, initialPosition, new AtomicBoolean(upgraded), stats); + } + EmptyRollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, boolean upgraded) { super(executor, job, initialState, initialPosition, new AtomicBoolean(upgraded)); @@ -124,7 +131,9 @@ public class RollupIndexerStateTests extends ESTestCase { } @Override - protected void onFinish() {} + protected void onFinish(ActionListener listener) { + listener.onResponse(null); + } } private static class DelayedEmptyRollupIndexer extends EmptyRollupIndexer { @@ -140,6 +149,11 @@ public class RollupIndexerStateTests extends ESTestCase { super(executor, job, initialState, initialPosition, randomBoolean()); } + DelayedEmptyRollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, + Map initialPosition, RollupIndexerJobStats stats) { + super(executor, job, initialState, initialPosition, randomBoolean(), stats); + } + private CountDownLatch newLatch() { return latch = new CountDownLatch(1); } @@ -214,7 +228,9 @@ public class RollupIndexerStateTests extends ESTestCase { } @Override - protected void onFinish() {} + protected void onFinish(ActionListener listener) { + listener.onResponse(null); + } } public void testStarted() throws Exception { @@ -248,9 +264,11 @@ public class RollupIndexerStateTests extends ESTestCase { AtomicBoolean isFinished = new AtomicBoolean(false); DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(executor, job, state, null) { @Override - protected void onFinish() { - super.onFinish(); - isFinished.set(true); + protected void onFinish(ActionListener listener) { + super.onFinish(ActionListener.wrap(r -> { + isFinished.set(true); + listener.onResponse(r); + }, listener::onFailure)); } }; final CountDownLatch latch = indexer.newLatch(); @@ -274,24 +292,32 @@ public class RollupIndexerStateTests extends ESTestCase { public void testStateChangeMidTrigger() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + + RollupIndexerJobStats stats = new RollupIndexerJobStats(); + RollupIndexerJobStats spyStats = spy(stats); RollupJobConfig config = mock(RollupJobConfig.class); - // We pull the config before a final state check, so this allows us to flip the state + // We call stats before a final state check, so this allows us to flip the state // and make sure the appropriate error is thrown - when(config.getGroupConfig()).then((Answer) invocationOnMock -> { + Answer forwardAndChangeState = invocation -> { + invocation.callRealMethod(); state.set(IndexerState.STOPPED); - return ConfigTestHelpers.randomGroupConfig(random()); - }); + return null; + }; + + doAnswer(forwardAndChangeState).when(spyStats).incrementNumInvocations(1L); RollupJob job = new RollupJob(config, Collections.emptyMap()); final ExecutorService executor = Executors.newFixedThreadPool(1); try { AtomicBoolean isFinished = new AtomicBoolean(false); - DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(executor, job, state, null) { + DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(executor, job, state, null, spyStats) { @Override - protected void onFinish() { - super.onFinish(); - isFinished.set(true); + protected void onFinish(ActionListener listener) { + super.onFinish(ActionListener.wrap(r -> { + isFinished.set(true); + listener.onResponse(r); + }, listener::onFailure)); } }; final CountDownLatch latch = indexer.newLatch(); @@ -318,7 +344,7 @@ public class RollupIndexerStateTests extends ESTestCase { try { EmptyRollupIndexer indexer = new EmptyRollupIndexer(executor, job, state, null) { @Override - protected void onFinish() { + protected void onFinish(ActionListener listener) { fail("Should not have called onFinish"); } From 26c63e011502c929c32939b099a968c47d37416b Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Mon, 8 Apr 2019 13:23:12 +1000 Subject: [PATCH 22/45] Add test for HTTP and Transport TLS on basic license (#40932) This adds a new security/qa test for TLS on a basic license. It starts a 2 node cluster with a basic license, and TLS enabled on both HTTP and Transport, and verifies the license type, x-pack SSL usage and SSL certificates API. It also upgrades the cluster to a trial license and performs that same set of checks (to ensure that clusters with basic license and TLS enabled can be upgraded to a higher feature license) Backport of: #40714 --- x-pack/plugin/security/build.gradle | 10 ++ x-pack/plugin/security/qa/build.gradle | 18 +++ .../plugin/security/qa/tls-basic/build.gradle | 48 +++++++ .../xpack/security/TlsWithBasicLicenseIT.java | 122 ++++++++++++++++++ .../src/test/resources/ssl/README.asciidoc | 48 +++++++ .../tls-basic/src/test/resources/ssl/ca.crt | 20 +++ .../tls-basic/src/test/resources/ssl/ca.key | 30 +++++ .../tls-basic/src/test/resources/ssl/ca.p12 | Bin 0 -> 1130 bytes .../tls-basic/src/test/resources/ssl/http.crt | 22 ++++ .../tls-basic/src/test/resources/ssl/http.key | 30 +++++ .../src/test/resources/ssl/transport.crt | 22 ++++ .../src/test/resources/ssl/transport.key | 30 +++++ 12 files changed, 400 insertions(+) create mode 100644 x-pack/plugin/security/qa/build.gradle create mode 100644 x-pack/plugin/security/qa/tls-basic/build.gradle create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.p12 create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt create mode 100644 x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index d60dbf8e1b4..7608543fb8a 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -298,3 +298,13 @@ unitTest { // installing them as individual plugins for integ tests doesn't make sense, // so we disable integ tests integTest.enabled = false + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + diff --git a/x-pack/plugin/security/qa/build.gradle b/x-pack/plugin/security/qa/build.gradle new file mode 100644 index 00000000000..f2f60527ec4 --- /dev/null +++ b/x-pack/plugin/security/qa/build.gradle @@ -0,0 +1,18 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.build' +unitTest.enabled = false + +dependencies { + compile project(':test:framework') +} + +subprojects { + project.tasks.withType(RestIntegTestTask) { + final File xPackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xPackResources) { + include 'rest-api-spec/api/**' + } + } +} + diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle new file mode 100644 index 00000000000..9f5ef26f6e6 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -0,0 +1,48 @@ +import org.elasticsearch.gradle.http.WaitForHttpResource + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +forbiddenPatterns { + exclude '**/*.key' + exclude '**/*.p12' +} + +File caFile = project.file('src/test/resources/ssl/ca.crt') + +integTestCluster { + numNodes=2 + + extraConfigFile 'http.key', project.projectDir.toPath().resolve('src/test/resources/ssl/http.key') + extraConfigFile 'http.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/http.crt') + extraConfigFile 'transport.key', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.key') + extraConfigFile 'transport.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.crt') + extraConfigFile 'ca.crt', caFile + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.http.ssl.enabled', 'true' + setting 'xpack.security.http.ssl.certificate', 'http.crt' + setting 'xpack.security.http.ssl.key', 'http.key' + setting 'xpack.security.http.ssl.key_passphrase', 'http-password' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.certificate', 'transport.crt' + setting 'xpack.security.transport.ssl.key', 'transport.key' + setting 'xpack.security.transport.ssl.key_passphrase', 'transport-password' + setting 'xpack.security.transport.ssl.certificate_authorities', 'ca.crt' + + waitCondition = { node, ant -> + WaitForHttpResource http = new WaitForHttpResource("https", node.httpUri(), numNodes) + http.setCertificateAuthorities(caFile) + return http.wait(5000) + } +} + diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java b/x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java new file mode 100644 index 00000000000..3b2aea4a08d --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; + +public class TlsWithBasicLicenseIT extends ESRestTestCase { + private static Path httpTrustStore; + + @BeforeClass + public static void findTrustStore() throws Exception { + final URL resource = TlsWithBasicLicenseIT.class.getResource("/ssl/ca.p12"); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource /ssl/ca.p12"); + } + httpTrustStore = PathUtils.get(resource.toURI()); + } + + @AfterClass + public static void cleanupStatics() { + httpTrustStore = null; + } + + @Override + protected String getProtocol() { + return "https"; + } + + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(TRUSTSTORE_PATH, httpTrustStore) + .put(TRUSTSTORE_PASSWORD, "password") + .build(); + } + + public void testWithBasicLicense() throws Exception { + checkLicenseType("basic"); + checkSSLEnabled(); + checkCertificateAPI(); + } + + public void testWithTrialLicense() throws Exception { + startTrial(); + try { + checkLicenseType("trial"); + checkSSLEnabled(); + checkCertificateAPI(); + } finally { + revertTrial(); + } + } + + private void startTrial() throws IOException { + Response response = client().performRequest(new Request("POST", "/_license/start_trial?acknowledge=true")); + assertOK(response); + } + + private void revertTrial() throws IOException { + client().performRequest(new Request("POST", "/_license/start_basic?acknowledge=true")); + } + + private void checkLicenseType(String type) throws IOException { + Map license = getAsMap("/_license"); + assertThat(license, notNullValue()); + assertThat(ObjectPath.evaluate(license, "license.type"), equalTo(type)); + } + + private void checkSSLEnabled() throws IOException { + Map usage = getAsMap("/_xpack/usage"); + assertThat(usage, notNullValue()); + assertThat(ObjectPath.evaluate(usage, "security.ssl.http.enabled"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.ssl.transport.enabled"), equalTo(true)); + } + + private void checkCertificateAPI() throws IOException { + Response response = client().performRequest(new Request("GET", "/_ssl/certificates")); + ObjectPath path = ObjectPath.createFromResponse(response); + final Object body = path.evaluate(""); + assertThat(body, instanceOf(List.class)); + final List certs = (List) body; + assertThat(certs, iterableWithSize(3)); + final List> certInfo = new ArrayList<>(); + for (int i = 0; i < certs.size(); i++) { + final Object element = certs.get(i); + assertThat(element, instanceOf(Map.class)); + final Map map = (Map) element; + certInfo.add(map); + assertThat(map.get("format"), equalTo("PEM")); + } + List paths = certInfo.stream().map(m -> String.valueOf(m.get("path"))).collect(Collectors.toList()); + assertThat(paths, containsInAnyOrder("http.crt", "transport.crt", "ca.crt")); + } + + +} + diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc new file mode 100644 index 00000000000..9ff94bf0786 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc @@ -0,0 +1,48 @@ += Keystore Details +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates +The certificates in this directory have been generated using elasticsearch-certutil (7.0.0 SNAPSHOT) + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --out=ca.zip --pass="ca-password" --days=3500 +unzip ca.zip +mv ca/ca.* ./ + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=http --out=http.zip --pass="http-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip http.zip +mv http/http.* ./ + +rm http.zip +rmdir http +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=transport --out=transport.zip --pass="transport-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip transport.zip +mv transport/transport.* ./ + +rm transport.zip +rmdir transport +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +keytool -importcert -file ca.crt -keystore ca.p12 -storetype PKCS12 -storepass "password" -alias ca +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt new file mode 100644 index 00000000000..5bcb6f77bc2 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSTCCAjGgAwIBAgIUNsCMQBpQB3zJAC1iERdc7yADVw0wDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTkwMzI5MDUxMjEyWhcNMjgxMDI3MDUxMjEyWjA0MTIwMAYD +VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMJL4SrJJsQpKFuHsNnWwzM9 +2Cnmsc7WzGEskV0ncSUloMxUZaZ8CJ2iuubN6KPe75ke8SS9vlNG3MEWRBVSPY4H +EJNcyiiI1w9c/yom6Kfvep1RvvRHlp+k/bDPzzuj4B8Dyg66TVYKRm+9uRWAUvZr +djhFB3cawbM1jD9ZaBLM4Qbdg0AlMqXWpkLPVtkD8lREPkAIhYxKx7TYqB1SbMg5 +ejfoRGF5qfl4luegWRlQKkOBCcJPZamcccNjDq9eXQm3vrp0/QEp0ODG14wU3B9R +G+2/yhh5KP3WWK/uksAmEv8YzG7UaCLNJRk/FuPz8uoSGLPM1e+2HWXsR9OnlF8C +AwEAAaNTMFEwHQYDVR0OBBYEFL+GbWzP3nPfx+OqvW5CYCqHN8ZlMB8GA1UdIwQY +MBaAFL+GbWzP3nPfx+OqvW5CYCqHN8ZlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAHZeLZ7yCvqQOJbQ3yoixYLVR33dSx/T/W5WQQGYcQ7TUZ4N +gXkV9kGD+9I/8NWgkttx4TTieWctyNPrhAqqWGuGvhCQ+WL8m67EPRiVdw7EY+61 +qlUbAdK39adDqbDeUI07dzd+wKlhwnHtd2dTcJEGluwLaU4ftuLA8DQNwzWxZVAW +EWzfTUgdc1SYTysE5C0d1Q9CbI+o0Na+CaW4DRqGh1OGyH7Fyck9WQp1nOAEQhD9 +sn4FOC4w+T92t/Ekpfcm5HHkYjGWK1EsCkRCj1m8QtyqBgByeXHCidH2pfKIuVdl +ZnaOfIkCQx49gLARjzzGp/OC/UfKVCWzpLHn7dY= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key new file mode 100644 index 00000000000..418d3ed0621 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,67376A5606FB27E9 + +v4OAjurrB7Tc2mVswSeaaYAiFomvSQmre8DlC5VNvavzT6Hlx5hIyEVIttcNeTeD +Hj4d+JOp5OO5Ew5cWgo0jtR2QIjGbrQe8t8oedJwhEiYC0IfX0rItJv1iaz4WO+8 +hz4J1lwAI9wFabmXIeHx0q3ZqqIfSOoAepO8W2SqIj0KSz3tKRoYaX7AzZ27muLN +K2Mej1EX/ftgZZNgfU62gJzGGsdQecLc+UZBDVTPZL3PLZmQV0r1sBXaq56Qk78t +DsUyYwA4zvPBIPkfydTxobylt1pSeZ7Yyni+iQk4X7T4jj3Q6wKrwjPNJ6p8Xcwn +4BN37DIYPPBEp56EUCbxl+iMkfRoCjZdaqhycw4LjKB0wloY2Zko6FaYTd0qPZ/m +2GM8MvIQ9bc4t9Bef2VAXhb8IUXJ+ro+sB7vlQRSLQ1JwHPAPiIFyRmilezAaupA +2DNLBIlmgMzh5Lh6vIcyHQVxsCoJesmVQCyyBy4lFPU9afcYLWjzgnBhW2SikTpW +/lC3VDloUjIYfC3qYhbHIomsUMCGk3xHIwLw1cNFnf7c/RX1q5bBZrJ8q6GVh/Rb +ulHcuCm5g/Jvt8TM8c2WIE5mzwkoFIe/XVY33Lyk237qCsPlVWwFpxa0UtWVpDnk +uuubgI0cb+zehN2f5sgHtdbphNNTflZyW+Uk0lCbYGNakXBILePFmURsThW3gQ44 +g+zPaiGkbB1qwE/TS3Vz17j8DkgWRsEJP7IBsZ/ljaUcs3zujH6EKN9YtwyIeoHo +VHBuF4RGew2Ps0NoLGYanpvu01ZUUr2C0ZbDjXLBy8ajOc5zgyMCBead19T+piFw +iGvA8D7eILz1xzbAcX7dry06Mc9o/CbFcRMIis3LVvdSuZDoRk/cv0mKo6rq/1MS +VeYgPjJ8QWuhulIYkmNipTRdzMsXEafEdsp+GruKnNri0u/lirfhYAXDGp2GAttJ +zKnbPkHSJRt1xWgtimU+CnnpEOp+qd2yFNgT/Nn2yjrsPqLqTkEdzbh2DoCYGPHe +HoAcs+MePKfqBh+W2MEJ/ZdDVz93lKoDTuk2cjaVVe+7YBdHW0gQzfW5ArscadUV ++mSzhUm9AIhM/Gk6t7rgVoWyO6PvkTgENKFmUUQkHnJWaaDIzji2xFR114Huw5rN +gHPn8HOKPIhVu1UV2N/MFLrjjvn8bft/vLkSxZ3c7AgYkPr8Mmd0b8ufTOlk5a+W +hkR4D7WZ7Hgkj1NIvRbjxCXTHFbHZqKJHeTTNCpCUygIH5g8h7RGVPS0XKylpbr1 +2kZU/AwlPcAPba+UcTKXOvy02NmiV5Bg6qYc8rcxv6aXKPOrxeW3Iop/ZesF7Nnu +ccR+rI78cQIGD1gAo3xLJ10/p0Rb9R/pWfHUY499Oymc926qWaj3mEl+xOJXxWOr +3Uf4yMg8mrfcm3JW7clWy3l+/++CSWBS/zqUpXKy5CbVdR8XQNS5Pg0fDgwkrcbv +7TviQ+vYD7aEI0w6mviljPkYVTXNpnRHyF7VfaEYff8032GxW99D3zeK7dd6yP4k +W/oN5IwXCvnfrteNtqSOIPOWw9gAp4x4EzmCin77s8SgMHOGsPcEhA== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.p12 b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.p12 new file mode 100644 index 0000000000000000000000000000000000000000..e2db32e6ddc6c72d880498b90ff013a7ba161e87 GIT binary patch literal 1130 zcmV-w1eN*I72$WOzc@hNe7A0TQVdp3JAHdg+%0fb zTOXL8HC=_-tt9^IyD%ZeZW>hDEWaSh#OJqCr(j++XMymifEyCb>+af_r%9zIDj47# zDs)&&SUeKiQtVZnN@O1>XwmXR5ozv)ifHztRSgQ4*`<9F{!<71IkINM3ySBh<94+-6|q)X5#R zyrraOKG;aTCw5nwCvI-Ns0Je{Y*;6dfXDomf`OLYy_ae0d&Qrf$O<+-|8|Gv%TEvo z!oNL~h7zL7GyuvleOb5H*gtIqiG6L}Ps^|y&4$`G%YR5#Cv___pW z!A*EAVIxynAXXkfi@+^|4*^}$08w8tO1k%XibDw=dR3e_g)cMmr?@T~S5rK`x#f%wP@?W|Tj~v&zE-UXfeC$3vWL)3BlaRFc zKsL|s%tjSpqkTxE{%<4)g*A}{EpZcRO-vl}-EqhiO|6xA)$W9w&6f4g>RG18UjVri zSk;0l!zz!g?l=?QXp&FtkF*U~tt`h_%Q%#&(>kcKHnwxP9|+Sn<_#8kfxlwHBo1{% z_!F}`72g9YmbCZi#t1qQSYUd34bE8S%gZ?(#3Wzj_bPUhRus;Tg|Nz&+jS`29T~6% zQ0pqoV4a=hxH|~V(Pr`HjSwFoQ+L3II+aT}-`;r-@RYDUSW9>}HIb{Bx_-r^E3s94 z8x6#4Dce{~j6?61)^dwXTiXXRw^9I1MA}~HMAutIB1uG5%0vZJX1Qg>! w+3XFqnR3Uul@d>-^q=QkBku$h+2FsgP^>d~P>Cin8NTb@)y9P`0s{etpq_jdPXGV_ literal 0 HcmV?d00001 diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt new file mode 100644 index 00000000000..cd0dcb680c2 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDszCCApugAwIBAgIVAJX8GTm+AWIicokE5npzZ2B3qad3MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDMyOTA1MTIyNVoXDTI4MTAyNzA1MTIyNVowDzENMAsG +A1UEAxMEaHR0cDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMGvsPmg +4lKfd1ie6TZQLdCxfXy6MooLHac1wUxyvHcUxlbuSchj+A2gVPBk6VaCV8OO4X7T +MslTJKw5877m28Xzw+CmUgDsXAJJy2IvM8X0IP/xktkJQ3uSUReSW2650TFj9Zcm +Z3AtMblo+cNnZMNWJBW1G1QMHHKMY5kukaB7Ia6CBec60k2HrkS6xmsMgwQPBa/k +VlbHkI7RzbmxohVJFHL34EFhifEL0qkYU5MnZ8PjH8U749VoZOYcY1MKb2sw9iYn +JTOv1gIFhd4Sw37occxDVaqZU/1X90ijZyvB/AugxRfmpLb83ZRMdVeQTiiXqMkg +1g94h7hgPpLA9AkCAwEAAaOB4DCB3TAdBgNVHQ4EFgQUc/bPDUIvgLwg9xwf9CxP +ec84o1YwHwYDVR0jBBgwFoAUv4ZtbM/ec9/H46q9bkJgKoc3xmUwgY8GA1UdEQSB +hzCBhIIJbG9jYWxob3N0ghdsb2NhbGhvc3Q2LmxvY2FsZG9tYWluNocEfwAAAYcQ +AAAAAAAAAAAAAAAAAAAAAYIKbG9jYWxob3N0NIIKbG9jYWxob3N0NoIVbG9jYWxo +b3N0LmxvY2FsZG9tYWlughdsb2NhbGhvc3Q0LmxvY2FsZG9tYWluNDAJBgNVHRME +AjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAJW7WWQkuNjDlQQ5H6bhMr2LhbC9TZWgFK +zWsIWuhd1QxiWbTp/Yegcbqs3hZ9MQtxU4egml/sMAdZSF3Kg3NeYtrHDj//oKYo +VSfTPNjQLG1/ckCM0RDfFYOV+Sb3ktau5QZGL+5ZDfcfPLSHCSHeP0tft2R03Hp4 +pOX8/xAVmv0hxE74X5qodQyNFdDa6rtRZESLzY1b+oaEhKM49MZCNZL9TvvNUkWC +hXdaVehqBVJkrlsnli6oqPBjpKNP2YkRG3eqy/Qd/sg6rwJqu/B0KBI8QBDkokSY +YORRviEmSe0+hmcBCTYZWN8WX3BrEPuGdBJXWi5G8GPGFg4rrOUE +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key new file mode 100644 index 00000000000..3b7571db543 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,A46C453D20DC86A7 + +eFBKmjJUmailcnfc1+a6lwR8G7sk4ff1De5hIYY8iNkpP6XVxZ/LrXttVF1x1SWy +YaUJL35Optzy4W+LglJgAdNo9XGaCsHuSi3z7aqYNdihSldKxDw3iIJEEuB63Lv7 +eu4pEYdOlRElEs71cmjMCSmg1pfeDRruShB9RUKy3Iw8tM6tV+t+vIaiVftb3i9O +AaTEUgAJqQjcISWy5JAxRwEwVDAhHe23vbVomxXlJKuTroezPFt5SxXQmdfNmP7B +D8iZR/Uf+7XdCFKC/7n6enYZfg5/IoaOO9sPG4bueFKmLAdXpmN1hKvJwIG1qKQT +Fz7x8FGi0S11BHDZMs5kJHBaiuXmq02mozb5XOFllQYl8+fsa4lscIFeQ/YbAjVo +g5nEVbqRUCSLy6F6JSX6SJB4ng/JMHzKLfhAUSpvotBxZbJ4IpNu06oCKjggiIoR +9z2YE6gR1pBJSyCDS8fJXtyLWN/WBdbvf1fw3t7utPFT606TYFOvt2KrSndcrTwb +EByWHJufxv8J+anrnnNM11RMTqhpi4MeXsaaA7jUCzh5QzxnT8imOyNDF8OVxEKk +Y9W9ToUchHojIJZGJhB2I1ndCUQaJF+OhLrjy2Zk/Imx3wBf3huyWAA8GNVQ04DD +mhDxWdZ30lJgxJH4xgk4l3nWBNAQ+X04lIyRi83tD/E9plX3EX2sWzBBHCSybh0C +bNHAQVMVaxEMTcCumk/USiuRcm4BL0495o4/debn9EExs95dw6pAhJoHZ8kc71GP +YOYNuQvz0Ljbu4ZO1/OgmNDtFuNV83GlDa6yUme/Di0SqmLzxUwPJIZ9I2dNtgLf +2emoUA9PSUl02Hcm5WN7AtmL/Pxz1joR/gKeNAII97PS9WFdqRS0ypwiiwp15mBU +LilEGB4V3laVJFw6sLFwPjWUYZCEhzSdAMnHfxrIZuhpfSi2W39w8Frqwx0JOUoX +HmogsyM/xqn9VelVNbWUP06IwJkcocWM1rzv3nkZOsKb5EhGOk1qrA/BKyajcazX +49x4wpIpJoz4tgStrlgxGZ0DeMT8PIrZGbZDhQ78MxnQe376CiXIOKtrZVOp6uoo +uDtYg9OiZZ2GDoSIgjAStpYbF4rkJI+3kyhR4oD8KfsC/rTG16hNCRnTIIiUECyU +1jWBLmqYWuMTiekb4asB6cWlQYwUUtSBt6ySB+zU+Cl0Wi3u+kXrsMthFnJE0GWB +EOCmHsvMqD+u0uArpJHpE0o9L3ePEkiDssU2MJdOLpb0AKW/uqAA/14a4JAr/y9Z +v+pUPDbjeoIXRNqzXkWEdHKZOnEGAE5QBLzScJqWU0YY7WP1+xpyoYapM37v9V/J +viNJW+gxvW9yZdxKzGm9P/UIjtndx2QnAa7mPgXOej/AMqpl+IkIJmvi13IEQTH2 +NuBghACrRp7YuffEroEs3P7fgCoiMHvabCiXkLhWoZqgVuiy72GuSwKEPK8bF30U +8u7lencUvnIRU9jL0kDaQL0kESw0f3dgE+ltQbgew5/rmqMgKpmDDoouLJf95wi2 +rvPGRb4QXpBO8V4/8VMPPJKT55ZDygjN45z1gwCZ2tbYtnKUOH82drx1TB2bvrso +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt new file mode 100644 index 00000000000..93121ed8b15 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIUe2Oa37SVQ5G1SpWiRS+abpjuNPMwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTkwMzI5MDUxMjM1WhcNMjgxMDI3MDUxMjM1WjAUMRIwEAYD +VQQDEwl0cmFuc3BvcnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCN +v6vW4Bwj0eku+Ivm6d+HQwzfLqAdnM8tHAgC4qMDk7a/X5ckTesTk2VOmX775zkT +SJex5uGuEuyTgZVEXQhkpZUXURGhnQ8/exxg2m3cwTin+o1XN5xCo6FUfU2IqQrf +1Xd7RKfXv/YCUlS2xzQVnFRYAYpMMzTtUloc37PWz7TYA/ei7p06BCKLGR785ipF +MWq0S+QVmldOlp1vhZrD+KpgxFdo0Gd+e0loLO6321sXBEksy4K/5FaknDT9Fc/f +NUVmLaiRPi2nW6nIBjYyoVhIPztkVdxfj7jNdJCvshnEY29Hhd7ra9njLbyxzK2d +ACpyf54TCNU0j5qRcqe7AgMBAAGjgeAwgd0wHQYDVR0OBBYEFDSaYLY3KEm7L3jF +iW7CwCdoqcZjMB8GA1UdIwQYMBaAFL+GbWzP3nPfx+OqvW5CYCqHN8ZlMIGPBgNV +HREEgYcwgYSCCWxvY2FsaG9zdIIXbG9jYWxob3N0Ni5sb2NhbGRvbWFpbjaHBH8A +AAGHEAAAAAAAAAAAAAAAAAAAAAGCCmxvY2FsaG9zdDSCCmxvY2FsaG9zdDaCFWxv +Y2FsaG9zdC5sb2NhbGRvbWFpboIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjQwCQYD +VR0TBAIwADANBgkqhkiG9w0BAQsFAAOCAQEAa3T5oaPucZRx5JFxqkSTaIpcptvw +iiZLpaEooX0QVMy+PkmnzNh/xaN5qWWzKFV4ihSURtgH7gbPjBF7/pTqqO8Ekshp +36I6WTuhvps4nR4iCKaMFfyCBDKBvtTIySxE2kZJlyvgAqdB3bww79FfZt+ftxEt +E1m5nFDWCxaATY0foYpRUAJTPfmnFWDZfP4ZglSWmNSfQAdsQfwMlu09jXWXw7Yx +Cd39f9KW1aQT4RstHNWuQwgskv0vuTo2r0r+1YWTNCFQVuA8OD620CmJs85zGOnj +5L0YyLK1KvvuARfjr/skpze7F1Leir9+NiaJjXA+xfnkoGniJ2AUvPC8xg== +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key new file mode 100644 index 00000000000..eace4a20859 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,EAC448D0A9AC0BE9 + +OnQAA8FLp6KDtp+AivEZB+TmTgAZ7oExMFLPL4o64i5onxLlJ15jG4MJ/YEyRZRa +T+KJLfO5BSW7EhgPQrR6UQ2yQtKGEwqq0nboIqEnEnpJQzzas2/d9rGIQFd4+519 +GNzstFVz8bn2+Z6xN0YS8/lVPUF6sxbt1DGy/BlSpUze63WNw5vxO4zmOu+8lzvT +ZkK8VGbeqv9j0YF/57NeTQ+b473f1cyGexpv7wsJD+62cmie9Z0iNoqkrV3yjNBk +gqoxWe8I42rKsNJiL/H0tyLMfICaVJs2urQjs9GHJGS/uy+MlzJCaEG5LlcTCXq3 +0d0e+OCWzHzzcQiLlzg0W/iCbMEpMvZcWlTaATRLcY96QKHMku9xaPLuO5BvL/XF +HLP0xHsc3no0HqX9/BRZFNdtc+7u3An46UEDmyjNZRkDSmhC/vVa6/+5pnp2eU2N +b88/cTmGYDdGoImcp9nIhBnyMqNmSeuho3g+w5oa03HyjlEQ5MS5VXHOnzzbH8lr +fTxVx/PPb3Ui8bs2X93JNm6atL8Yn75QkyX7iYypuzzhgq3wKETHpV4VJ2XtfbK8 +HAvMIc+IOWDA3ZYqIgkA8yn3RzVB+mTf/px0aWR53Ie90uLXsF8y2F7nuScnVDqG +9ul03RSPfeO+bUnyl5JsPnRN/0i/Ge1/SvX+j4L3ir65NEvrC2BPEfzTFXh6KMs4 +VF1USmWPAgfg7FjJjUvi/7/2+YOswFTuMdun9plV3heJ9AyCyYrPJuP7iXeF+L1Z +nsGfD4ZaZJ81zXW1VqTTSBdyreK+t9YjGVL7hEUhv6k/SOlyhcvaYubB7f3aTegU +IN+2T3hFCnBNgvqHKtAJ1FBgzatavJOk4Oo0aDKThwCrxp9MdxPRBOMrBnRHsdtN +6/u7hHObFNIIBoxdSMMdF4NZXkYSMYCM2dq+FvEzDCJ6krHxq1W71j109F+Ow0B7 +Je4jXboH3rrvnh9HtowYWFufB6GPTCmV822iC1u6DGwNTLPunMMLhASSENNR3Lk0 +xtfVAjcKA8/Xo7Is62OOa2ud2Z4Zjl2OdANZ7lgZScprfiHI6LrHAw9tPGcn9xJ2 +8dtQILCSkoHKRWlR41e9Xx+jRhOXl3GKWqFKAtH3jGQu5kH+IgN5IeUIerbKe0Yn +vk+2QqLQssnQPkQDGketuSMx/+vCbvQscmfA+bfNB+UIbwsjmyQk5W+mxz3pncvG +KCat5pCspfdj0oVHl+WEoAR3raXFwcZAWm57HtCm3kfPTSImDLT6c+sCriY48vDN +YhC3DtzKwVfmw44q/hs0QzgWmt6p22ZwNMTnVxvQSJeFfLV/nEwxtmM/WFXoyDqF +UoR/T2p+ngRyysCtmYhf6Qnq2J6CZum7MUvIVtSL7c+eazXbVTTHbLFNrcX/Zitl +Bf03Rz7ZJGSlqczdhi5gTSIC4dD9hLWbQlw3OcH45UiGw5tcBaAd86FxarPqE2/Z +NQSp88Q9peJfTxcY9QyQhDDUqyfMDoNMRRVfEMP5qNicH3Y5jkKiCJwGbqIC238/ +38wcJnIrkwMk2tttgq1Lr1QfWplOHxe51zJ7zXjnigMkt/AodqjjNQ== +-----END RSA PRIVATE KEY----- From c29027d99e969d464fcb461921ae5b0de6f7d086 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 8 Apr 2019 07:08:36 +0200 Subject: [PATCH 23/45] Change HLRC CCR response tests to use AbstractResponseTestCase base class. (#40257) This way the response classes are tested in a more realistic setting. Relates to #39745 --- client/rest-high-level/build.gradle | 3 + .../client/ccr/CcrStatsResponseTests.java | 411 ++++++------------ .../client/ccr/FollowInfoResponseTests.java | 118 +++-- .../client/ccr/FollowStatsResponseTests.java | 299 ++++--------- .../GetAutoFollowPatternResponseTests.java | 128 +++--- .../client/ccr/PutFollowResponseTests.java | 37 +- 6 files changed, 374 insertions(+), 622 deletions(-) diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 44262f09346..420bd6d7414 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -64,6 +64,9 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" //this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs testCompile "org.elasticsearch:rest-api-spec:${version}" + // Needed for serialization tests: + // (In order to serialize a server side class to a client side class or the other way around) + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" restSpec "org.elasticsearch:rest-api-spec:${version}" } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java index cb8072f6baf..d56b762520c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java @@ -20,50 +20,110 @@ package org.elasticsearch.client.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.client.ccr.AutoFollowStats.AutoFollowedCluster; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class CcrStatsResponseTests extends ESTestCase { +public class CcrStatsResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - CcrStatsResponseTests::createTestInstance, - CcrStatsResponseTests::toXContent, - CcrStatsResponse::fromXContent) - .supportsUnknownFields(true) - .assertEqualsConsumer(CcrStatsResponseTests::assertEqualInstances) - .assertToXContentEquivalence(false) - .test(); + @Override + protected CcrStatsAction.Response createServerTestInstance() { + org.elasticsearch.xpack.core.ccr.AutoFollowStats autoFollowStats = new org.elasticsearch.xpack.core.ccr.AutoFollowStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomReadExceptions(), + randomTrackingClusters() + ); + FollowStatsAction.StatsResponses statsResponse = createStatsResponse(); + return new CcrStatsAction.Response(autoFollowStats, statsResponse); } - // Needed, because exceptions in IndicesFollowStats and AutoFollowStats cannot be compared - private static void assertEqualInstances(CcrStatsResponse expectedInstance, CcrStatsResponse newInstance) { - assertNotSame(expectedInstance, newInstance); + static NavigableMap> randomReadExceptions() { + final int count = randomIntBetween(0, 16); + final NavigableMap> readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(), + new ElasticsearchException(new IllegalStateException("index [" + i + "]")))); + } + return readExceptions; + } + static NavigableMap randomTrackingClusters() { + final int count = randomIntBetween(0, 16); + final NavigableMap readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put("" + i, + new org.elasticsearch.xpack.core.ccr.AutoFollowStats.AutoFollowedCluster(randomLong(), randomNonNegativeLong())); + } + return readExceptions; + } + + static FollowStatsAction.StatsResponses createStatsResponse() { + int numResponses = randomIntBetween(0, 8); + List responses = new ArrayList<>(numResponses); + for (int i = 0; i < numResponses; i++) { + ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + randomAlphaOfLength(4), + randomAlphaOfLength(4), + randomAlphaOfLength(4), + randomInt(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Collections.emptyNavigableMap(), + randomLong(), + randomBoolean() ? new ElasticsearchException("fatal error") : null); + responses.add(new FollowStatsAction.StatsResponse(status)); + } + return new FollowStatsAction.StatsResponses(Collections.emptyList(), Collections.emptyList(), responses); + } + + @Override + protected CcrStatsResponse doParseToClientInstance(XContentParser parser) throws IOException { + return CcrStatsResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(CcrStatsAction.Response serverTestInstance, CcrStatsResponse clientInstance) { { - AutoFollowStats newAutoFollowStats = newInstance.getAutoFollowStats(); - AutoFollowStats expectedAutoFollowStats = expectedInstance.getAutoFollowStats(); + AutoFollowStats newAutoFollowStats = clientInstance.getAutoFollowStats(); + org.elasticsearch.xpack.core.ccr.AutoFollowStats expectedAutoFollowStats = serverTestInstance.getAutoFollowStats(); assertThat(newAutoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(expectedAutoFollowStats.getNumberOfSuccessfulFollowIndices())); assertThat(newAutoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), @@ -89,62 +149,69 @@ public class CcrStatsResponseTests extends ESTestCase { } } { - IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats(); - IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats(); + IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats(); + + // sort by index name, then shard ID + final Map> expectedIndicesFollowStats = new TreeMap<>(); + for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getFollowStats().getStatsResponses()) { + expectedIndicesFollowStats.computeIfAbsent( + statsResponse.status().followerIndex(), + k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); + } assertThat(newIndicesFollowStats.getShardFollowStats().size(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().size())); + equalTo(expectedIndicesFollowStats.size())); assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet())); + equalTo(expectedIndicesFollowStats.keySet())); for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { List newStats = indexEntry.getValue(); - List expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey()); + Map expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey()); assertThat(newStats.size(), equalTo(expectedStats.size())); for (int i = 0; i < newStats.size(); i++) { ShardFollowStats actualShardFollowStats = newStats.get(i); - ShardFollowStats expectedShardFollowStats = expectedStats.get(i); + ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status(); assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); - assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex())); - assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex())); + assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex())); + assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex())); assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo())); + equalTo(expectedShardFollowStats.leaderGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo())); assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo())); + equalTo(expectedShardFollowStats.followerGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo())); assertThat(actualShardFollowStats.getOutstandingReadRequests(), - equalTo(expectedShardFollowStats.getOutstandingReadRequests())); + equalTo(expectedShardFollowStats.outstandingReadRequests())); assertThat(actualShardFollowStats.getOutstandingWriteRequests(), - equalTo(expectedShardFollowStats.getOutstandingWriteRequests())); + equalTo(expectedShardFollowStats.outstandingWriteRequests())); assertThat(actualShardFollowStats.getWriteBufferOperationCount(), - equalTo(expectedShardFollowStats.getWriteBufferOperationCount())); + equalTo(expectedShardFollowStats.writeBufferOperationCount())); assertThat(actualShardFollowStats.getFollowerMappingVersion(), - equalTo(expectedShardFollowStats.getFollowerMappingVersion())); + equalTo(expectedShardFollowStats.followerMappingVersion())); assertThat(actualShardFollowStats.getFollowerSettingsVersion(), - equalTo(expectedShardFollowStats.getFollowerSettingsVersion())); + equalTo(expectedShardFollowStats.followerSettingsVersion())); assertThat(actualShardFollowStats.getTotalReadTimeMillis(), - equalTo(expectedShardFollowStats.getTotalReadTimeMillis())); + equalTo(expectedShardFollowStats.totalReadTimeMillis())); assertThat(actualShardFollowStats.getSuccessfulReadRequests(), - equalTo(expectedShardFollowStats.getSuccessfulReadRequests())); - assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests())); - assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads())); - assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead())); + equalTo(expectedShardFollowStats.successfulReadRequests())); + assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests())); + assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads())); + assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead())); assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), - equalTo(expectedShardFollowStats.getTotalWriteTimeMillis())); + equalTo(expectedShardFollowStats.totalWriteTimeMillis())); assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), - equalTo(expectedShardFollowStats.getSuccessfulWriteRequests())); + equalTo(expectedShardFollowStats.successfulWriteRequests())); assertThat(actualShardFollowStats.getFailedWriteRequests(), - equalTo(expectedShardFollowStats.getFailedWriteRequests())); - assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten())); + equalTo(expectedShardFollowStats.failedWriteRequests())); + assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten())); assertThat(actualShardFollowStats.getReadExceptions().size(), - equalTo(expectedShardFollowStats.getReadExceptions().size())); + equalTo(expectedShardFollowStats.readExceptions().size())); assertThat(actualShardFollowStats.getReadExceptions().keySet(), - equalTo(expectedShardFollowStats.getReadExceptions().keySet())); + equalTo(expectedShardFollowStats.readExceptions().keySet())); for (final Map.Entry> entry : actualShardFollowStats.getReadExceptions().entrySet()) { final Tuple expectedTuple = - expectedShardFollowStats.getReadExceptions().get(entry.getKey()); + expectedShardFollowStats.readExceptions().get(entry.getKey()); assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); // x-content loses the exception final ElasticsearchException expected = expectedTuple.v2(); @@ -156,246 +223,10 @@ public class CcrStatsResponseTests extends ESTestCase { assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); } assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), - equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis())); + equalTo(expectedShardFollowStats.timeSinceLastReadMillis())); } } } } - private static void toXContent(CcrStatsResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - AutoFollowStats autoFollowStats = response.getAutoFollowStats(); - builder.startObject(CcrStatsResponse.AUTO_FOLLOW_STATS_FIELD.getPreferredName()); - { - builder.field(AutoFollowStats.NUMBER_OF_SUCCESSFUL_INDICES_AUTO_FOLLOWED.getPreferredName(), - autoFollowStats.getNumberOfSuccessfulFollowIndices()); - builder.field(AutoFollowStats.NUMBER_OF_FAILED_REMOTE_CLUSTER_STATE_REQUESTS.getPreferredName(), - autoFollowStats.getNumberOfFailedRemoteClusterStateRequests()); - builder.field(AutoFollowStats.NUMBER_OF_FAILED_INDICES_AUTO_FOLLOWED.getPreferredName(), - autoFollowStats.getNumberOfFailedFollowIndices()); - builder.startArray(AutoFollowStats.RECENT_AUTO_FOLLOW_ERRORS.getPreferredName()); - for (Map.Entry> entry : - autoFollowStats.getRecentAutoFollowErrors().entrySet()) { - builder.startObject(); - { - builder.field(AutoFollowStats.LEADER_INDEX.getPreferredName(), entry.getKey()); - builder.field(AutoFollowStats.TIMESTAMP.getPreferredName(), entry.getValue().v1()); - builder.field(AutoFollowStats.AUTO_FOLLOW_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, entry.getValue().v2()); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endArray(); - builder.startArray(AutoFollowStats.AUTO_FOLLOWED_CLUSTERS.getPreferredName()); - for (Map.Entry entry : autoFollowStats.getAutoFollowedClusters().entrySet()) { - builder.startObject(); - { - builder.field(AutoFollowStats.CLUSTER_NAME.getPreferredName(), entry.getKey()); - builder.field(AutoFollowStats.TIME_SINCE_LAST_CHECK_MILLIS.getPreferredName(), - entry.getValue().getTimeSinceLastCheckMillis()); - builder.field(AutoFollowStats.LAST_SEEN_METADATA_VERSION.getPreferredName(), - entry.getValue().getLastSeenMetadataVersion()); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - - IndicesFollowStats indicesFollowStats = response.getIndicesFollowStats(); - builder.startObject(CcrStatsResponse.FOLLOW_STATS_FIELD.getPreferredName()); - { - builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName()); - for (Map.Entry> indexEntry : - indicesFollowStats.getShardFollowStats().entrySet()) { - builder.startObject(); - { - builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey()); - builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName()); - { - for (ShardFollowStats stats : indexEntry.getValue()) { - builder.startObject(); - { - builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster()); - builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex()); - builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex()); - builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId()); - builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getLeaderGlobalCheckpoint()); - builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo()); - builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getFollowerGlobalCheckpoint()); - builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), - stats.getFollowerMaxSeqNo()); - builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), - stats.getLastRequestedSeqNo()); - builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(), - stats.getOutstandingReadRequests()); - builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(), - stats.getOutstandingWriteRequests()); - builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), - stats.getWriteBufferOperationCount()); - builder.humanReadableField( - ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), - "write_buffer_size", - new ByteSizeValue(stats.getWriteBufferSizeInBytes())); - builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), - stats.getFollowerMappingVersion()); - builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(), - stats.getFollowerSettingsVersion()); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_time", - new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS)); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_remote_exec_time", - new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulReadRequests()); - builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(), - stats.getFailedReadRequests()); - builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads()); - builder.humanReadableField( - ShardFollowStats.BYTES_READ.getPreferredName(), - "total_read", - new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES)); - builder.humanReadableField( - ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), - "total_write_time", - new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulWriteRequests()); - builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(), - stats.getFailedWriteRequests()); - builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten()); - builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName()); - { - for (final Map.Entry> entry : - stats.getReadExceptions().entrySet()) { - builder.startObject(); - { - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), - entry.getKey()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(), - entry.getValue().v1()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - entry.getValue().v2()); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - builder.humanReadableField( - ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), - "time_since_last_read", - new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS)); - if (stats.getFatalException() != null) { - builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - stats.getFatalException()); - } - builder.endObject(); - } - } - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - } - builder.endObject(); - } - - private static CcrStatsResponse createTestInstance() { - return new CcrStatsResponse(randomAutoFollowStats(), randomIndicesFollowStats()); - } - - private static AutoFollowStats randomAutoFollowStats() { - final int count = randomIntBetween(0, 16); - final NavigableMap> readExceptions = new TreeMap<>(); - for (int i = 0; i < count; i++) { - readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(), - new ElasticsearchException(new IllegalStateException("index [" + i + "]")))); - } - final NavigableMap autoFollowClusters = new TreeMap<>(); - for (int i = 0; i < count; i++) { - autoFollowClusters.put("" + i, new AutoFollowedCluster(randomLong(), randomNonNegativeLong())); - } - return new AutoFollowStats( - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - readExceptions, - autoFollowClusters - ); - } - - static IndicesFollowStats randomIndicesFollowStats() { - int numIndices = randomIntBetween(0, 16); - NavigableMap> shardFollowStats = new TreeMap<>(); - for (int i = 0; i < numIndices; i++) { - String index = randomAlphaOfLength(4); - int numShards = randomIntBetween(0, 5); - List stats = new ArrayList<>(numShards); - shardFollowStats.put(index, stats); - for (int j = 0; j < numShards; j++) { - final int count = randomIntBetween(0, 16); - final NavigableMap> readExceptions = new TreeMap<>(); - for (long k = 0; k < count; k++) { - readExceptions.put(k, new Tuple<>(randomIntBetween(0, Integer.MAX_VALUE), - new ElasticsearchException(new IllegalStateException("index [" + k + "]")))); - } - - stats.add(new ShardFollowStats( - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomInt(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomLong(), - readExceptions, - randomBoolean() ? new ElasticsearchException("fatal error") : null)); - } - } - return new IndicesFollowStats(shardFollowStats); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java index 5cd327495dc..2c5bfba5025 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java @@ -19,59 +19,89 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.client.ccr.FollowInfoResponse.FollowerInfo; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; +import org.elasticsearch.xpack.core.ccr.action.FollowParameters; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; -public class FollowInfoResponseTests extends ESTestCase { +public class FollowInfoResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - FollowInfoResponseTests::createTestInstance, - FollowInfoResponseTests::toXContent, - FollowInfoResponse::fromXContent) - .supportsUnknownFields(true) - .test(); - } - - private static void toXContent(FollowInfoResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.startArray(FollowInfoResponse.FOLLOWER_INDICES_FIELD.getPreferredName()); - for (FollowerInfo info : response.getInfos()) { - builder.startObject(); - builder.field(FollowerInfo.FOLLOWER_INDEX_FIELD.getPreferredName(), info.getFollowerIndex()); - builder.field(FollowerInfo.REMOTE_CLUSTER_FIELD.getPreferredName(), info.getRemoteCluster()); - builder.field(FollowerInfo.LEADER_INDEX_FIELD.getPreferredName(), info.getLeaderIndex()); - builder.field(FollowerInfo.STATUS_FIELD.getPreferredName(), info.getStatus().getName()); - if (info.getParameters() != null) { - builder.startObject(FollowerInfo.PARAMETERS_FIELD.getPreferredName()); - { - info.getParameters().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endArray(); - builder.endObject(); - } - - private static FollowInfoResponse createTestInstance() { - int numInfos = randomIntBetween(0, 64); - List infos = new ArrayList<>(numInfos); + @Override + protected FollowInfoAction.Response createServerTestInstance() { + int numInfos = randomIntBetween(0, 32); + List infos = new ArrayList<>(numInfos); for (int i = 0; i < numInfos; i++) { - FollowInfoResponse.Status status = randomFrom(FollowInfoResponse.Status.values()); - FollowConfig followConfig = randomBoolean() ? FollowConfigTests.createTestInstance() : null; - infos.add(new FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), status, followConfig)); + FollowParameters followParameters = null; + if (randomBoolean()) { + followParameters = randomFollowParameters(); + } + + infos.add(new FollowInfoAction.Response.FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), + randomFrom(FollowInfoAction.Response.Status.values()), followParameters)); + } + return new FollowInfoAction.Response(infos); + } + + static FollowParameters randomFollowParameters() { + FollowParameters followParameters = new FollowParameters(); + followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + followParameters.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + return followParameters; + } + + @Override + protected FollowInfoResponse doParseToClientInstance(XContentParser parser) throws IOException { + return FollowInfoResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(FollowInfoAction.Response serverTestInstance, FollowInfoResponse clientInstance) { + assertThat(serverTestInstance.getFollowInfos().size(), equalTo(clientInstance.getInfos().size())); + for (int i = 0; i < serverTestInstance.getFollowInfos().size(); i++) { + FollowInfoAction.Response.FollowerInfo serverFollowInfo = serverTestInstance.getFollowInfos().get(i); + FollowInfoResponse.FollowerInfo clientFollowerInfo = clientInstance.getInfos().get(i); + + assertThat(serverFollowInfo.getRemoteCluster(), equalTo(clientFollowerInfo.getRemoteCluster())); + assertThat(serverFollowInfo.getLeaderIndex(), equalTo(clientFollowerInfo.getLeaderIndex())); + assertThat(serverFollowInfo.getFollowerIndex(), equalTo(clientFollowerInfo.getFollowerIndex())); + assertThat(serverFollowInfo.getStatus().toString().toLowerCase(Locale.ROOT), + equalTo(clientFollowerInfo.getStatus().getName().toLowerCase(Locale.ROOT))); + + FollowParameters serverParams = serverFollowInfo.getParameters(); + FollowConfig clientParams = clientFollowerInfo.getParameters(); + if (serverParams != null) { + assertThat(serverParams.getMaxReadRequestOperationCount(), equalTo(clientParams.getMaxReadRequestOperationCount())); + assertThat(serverParams.getMaxWriteRequestOperationCount(), equalTo(clientParams.getMaxWriteRequestOperationCount())); + assertThat(serverParams.getMaxOutstandingReadRequests(), equalTo(clientParams.getMaxOutstandingReadRequests())); + assertThat(serverParams.getMaxOutstandingWriteRequests(), equalTo(clientParams.getMaxOutstandingWriteRequests())); + assertThat(serverParams.getMaxReadRequestSize(), equalTo(clientParams.getMaxReadRequestSize())); + assertThat(serverParams.getMaxWriteRequestSize(), equalTo(clientParams.getMaxWriteRequestSize())); + assertThat(serverParams.getMaxWriteBufferCount(), equalTo(clientParams.getMaxWriteBufferCount())); + assertThat(serverParams.getMaxWriteBufferSize(), equalTo(clientParams.getMaxWriteBufferSize())); + assertThat(serverParams.getMaxRetryDelay(), equalTo(clientParams.getMaxRetryDelay())); + assertThat(serverParams.getReadPollTimeout(), equalTo(clientParams.getReadPollTimeout())); + } else { + assertThat(clientParams, nullValue()); + } } - return new FollowInfoResponse(infos); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java index 5ec3cb4edcf..cd7257342c7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java @@ -20,234 +20,115 @@ package org.elasticsearch.client.ccr; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; +import java.util.TreeMap; -import static org.elasticsearch.client.ccr.CcrStatsResponseTests.randomIndicesFollowStats; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.elasticsearch.client.ccr.CcrStatsResponseTests.createStatsResponse; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class FollowStatsResponseTests extends ESTestCase { +public class FollowStatsResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - FollowStatsResponseTests::createTestInstance, - FollowStatsResponseTests::toXContent, - FollowStatsResponse::fromXContent) - .supportsUnknownFields(true) - .assertEqualsConsumer(FollowStatsResponseTests::assertEqualInstances) - .assertToXContentEquivalence(false) - .test(); + @Override + protected FollowStatsAction.StatsResponses createServerTestInstance() { + return createStatsResponse(); } - // Needed, because exceptions in IndicesFollowStats cannot be compared - private static void assertEqualInstances(FollowStatsResponse expectedInstance, FollowStatsResponse newInstance) { - assertNotSame(expectedInstance, newInstance); - { - IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats(); - IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats(); - assertThat(newIndicesFollowStats.getShardFollowStats().size(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().size())); - assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), - equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet())); - for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { - List newStats = indexEntry.getValue(); - List expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey()); - assertThat(newStats.size(), equalTo(expectedStats.size())); - for (int i = 0; i < newStats.size(); i++) { - ShardFollowStats actualShardFollowStats = newStats.get(i); - ShardFollowStats expectedShardFollowStats = expectedStats.get(i); + @Override + protected FollowStatsResponse doParseToClientInstance(XContentParser parser) throws IOException { + return FollowStatsResponse.fromXContent(parser); + } - assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); - assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex())); - assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex())); - assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); - assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo())); - assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), - equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo())); - assertThat(actualShardFollowStats.getOutstandingReadRequests(), - equalTo(expectedShardFollowStats.getOutstandingReadRequests())); - assertThat(actualShardFollowStats.getOutstandingWriteRequests(), - equalTo(expectedShardFollowStats.getOutstandingWriteRequests())); - assertThat(actualShardFollowStats.getWriteBufferOperationCount(), - equalTo(expectedShardFollowStats.getWriteBufferOperationCount())); - assertThat(actualShardFollowStats.getFollowerMappingVersion(), - equalTo(expectedShardFollowStats.getFollowerMappingVersion())); - assertThat(actualShardFollowStats.getFollowerSettingsVersion(), - equalTo(expectedShardFollowStats.getFollowerSettingsVersion())); - assertThat(actualShardFollowStats.getTotalReadTimeMillis(), - equalTo(expectedShardFollowStats.getTotalReadTimeMillis())); - assertThat(actualShardFollowStats.getSuccessfulReadRequests(), - equalTo(expectedShardFollowStats.getSuccessfulReadRequests())); - assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests())); - assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads())); - assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead())); - assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), - equalTo(expectedShardFollowStats.getTotalWriteTimeMillis())); - assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), - equalTo(expectedShardFollowStats.getSuccessfulWriteRequests())); - assertThat(actualShardFollowStats.getFailedWriteRequests(), - equalTo(expectedShardFollowStats.getFailedWriteRequests())); - assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten())); - assertThat(actualShardFollowStats.getReadExceptions().size(), - equalTo(expectedShardFollowStats.getReadExceptions().size())); - assertThat(actualShardFollowStats.getReadExceptions().keySet(), - equalTo(expectedShardFollowStats.getReadExceptions().keySet())); - for (final Map.Entry> entry : - actualShardFollowStats.getReadExceptions().entrySet()) { - final Tuple expectedTuple = - expectedShardFollowStats.getReadExceptions().get(entry.getKey()); - assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); - // x-content loses the exception - final ElasticsearchException expected = expectedTuple.v2(); - assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage())); - assertNotNull(entry.getValue().v2().getCause()); - assertThat( - entry.getValue().v2().getCause(), - anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); - assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); - } - assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), - equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis())); + @Override + protected void assertInstances(FollowStatsAction.StatsResponses serverTestInstance, FollowStatsResponse clientInstance) { + IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats(); + + // sort by index name, then shard ID + final Map> expectedIndicesFollowStats = new TreeMap<>(); + for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getStatsResponses()) { + expectedIndicesFollowStats.computeIfAbsent( + statsResponse.status().followerIndex(), + k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); + } + assertThat(newIndicesFollowStats.getShardFollowStats().size(), + equalTo(expectedIndicesFollowStats.size())); + assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), + equalTo(expectedIndicesFollowStats.keySet())); + for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { + List newStats = indexEntry.getValue(); + Map expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey()); + assertThat(newStats.size(), equalTo(expectedStats.size())); + for (int i = 0; i < newStats.size(); i++) { + ShardFollowStats actualShardFollowStats = newStats.get(i); + ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status(); + + assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); + assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex())); + assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex())); + assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); + assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), + equalTo(expectedShardFollowStats.leaderGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo())); + assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), + equalTo(expectedShardFollowStats.followerGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo())); + assertThat(actualShardFollowStats.getOutstandingReadRequests(), + equalTo(expectedShardFollowStats.outstandingReadRequests())); + assertThat(actualShardFollowStats.getOutstandingWriteRequests(), + equalTo(expectedShardFollowStats.outstandingWriteRequests())); + assertThat(actualShardFollowStats.getWriteBufferOperationCount(), + equalTo(expectedShardFollowStats.writeBufferOperationCount())); + assertThat(actualShardFollowStats.getFollowerMappingVersion(), + equalTo(expectedShardFollowStats.followerMappingVersion())); + assertThat(actualShardFollowStats.getFollowerSettingsVersion(), + equalTo(expectedShardFollowStats.followerSettingsVersion())); + assertThat(actualShardFollowStats.getTotalReadTimeMillis(), + equalTo(expectedShardFollowStats.totalReadTimeMillis())); + assertThat(actualShardFollowStats.getSuccessfulReadRequests(), + equalTo(expectedShardFollowStats.successfulReadRequests())); + assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests())); + assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads())); + assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead())); + assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), + equalTo(expectedShardFollowStats.totalWriteTimeMillis())); + assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), + equalTo(expectedShardFollowStats.successfulWriteRequests())); + assertThat(actualShardFollowStats.getFailedWriteRequests(), + equalTo(expectedShardFollowStats.failedWriteRequests())); + assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten())); + assertThat(actualShardFollowStats.getReadExceptions().size(), + equalTo(expectedShardFollowStats.readExceptions().size())); + assertThat(actualShardFollowStats.getReadExceptions().keySet(), + equalTo(expectedShardFollowStats.readExceptions().keySet())); + for (final Map.Entry> entry : + actualShardFollowStats.getReadExceptions().entrySet()) { + final Tuple expectedTuple = + expectedShardFollowStats.readExceptions().get(entry.getKey()); + assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); + // x-content loses the exception + final ElasticsearchException expected = expectedTuple.v2(); + assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().v2().getCause()); + assertThat( + entry.getValue().v2().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); } + assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), + equalTo(expectedShardFollowStats.timeSinceLastReadMillis())); } } } - private static void toXContent(FollowStatsResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName()); - for (Map.Entry> indexEntry : - response.getIndicesFollowStats().getShardFollowStats().entrySet()) { - builder.startObject(); - { - builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey()); - builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName()); - { - for (ShardFollowStats stats : indexEntry.getValue()) { - builder.startObject(); - { - builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster()); - builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex()); - builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex()); - builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId()); - builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getLeaderGlobalCheckpoint()); - builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo()); - builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), - stats.getFollowerGlobalCheckpoint()); - builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), - stats.getFollowerMaxSeqNo()); - builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), - stats.getLastRequestedSeqNo()); - builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(), - stats.getOutstandingReadRequests()); - builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(), - stats.getOutstandingWriteRequests()); - builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), - stats.getWriteBufferOperationCount()); - builder.humanReadableField( - ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), - "write_buffer_size", - new ByteSizeValue(stats.getWriteBufferSizeInBytes())); - builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), - stats.getFollowerMappingVersion()); - builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(), - stats.getFollowerSettingsVersion()); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_time", - new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS)); - builder.humanReadableField( - ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), - "total_read_remote_exec_time", - new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulReadRequests()); - builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(), - stats.getFailedReadRequests()); - builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads()); - builder.humanReadableField( - ShardFollowStats.BYTES_READ.getPreferredName(), - "total_read", - new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES)); - builder.humanReadableField( - ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), - "total_write_time", - new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS)); - builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), - stats.getSuccessfulWriteRequests()); - builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(), - stats.getFailedWriteRequests()); - builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten()); - builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName()); - { - for (final Map.Entry> entry : - stats.getReadExceptions().entrySet()) { - builder.startObject(); - { - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), - entry.getKey()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(), - entry.getValue().v1()); - builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - entry.getValue().v2()); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - builder.humanReadableField( - ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), - "time_since_last_read", - new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS)); - if (stats.getFatalException() != null) { - builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, - stats.getFatalException()); - } - builder.endObject(); - } - } - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - } - - private static FollowStatsResponse createTestInstance() { - return new FollowStatsResponse(randomIndicesFollowStats()); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java index f6f0f1747e2..65ef3aa062d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java @@ -19,99 +19,111 @@ package org.elasticsearch.client.ccr; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD; -import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD; -import static org.elasticsearch.client.ccr.PutFollowRequest.REMOTE_CLUSTER_FIELD; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; -public class GetAutoFollowPatternResponseTests extends ESTestCase { +public class GetAutoFollowPatternResponseTests extends AbstractResponseTestCase< + GetAutoFollowPatternAction.Response, + GetAutoFollowPatternResponse> { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - this::createTestInstance, - GetAutoFollowPatternResponseTests::toXContent, - GetAutoFollowPatternResponse::fromXContent) - .supportsUnknownFields(true) - .test(); - } - - private GetAutoFollowPatternResponse createTestInstance() { + @Override + protected GetAutoFollowPatternAction.Response createServerTestInstance() { int numPatterns = randomIntBetween(0, 16); - NavigableMap patterns = new TreeMap<>(); + NavigableMap patterns = new TreeMap<>(); for (int i = 0; i < numPatterns; i++) { - GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern( - randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4)); + String remoteCluster = randomAlphaOfLength(4); + List leaderIndexPatters = Collections.singletonList(randomAlphaOfLength(4)); + String followIndexNamePattern = randomAlphaOfLength(4); + + Integer maxOutstandingReadRequests = null; if (randomBoolean()) { - pattern.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + maxOutstandingReadRequests = randomIntBetween(0, Integer.MAX_VALUE); } + Integer maxOutstandingWriteRequests = null; if (randomBoolean()) { - pattern.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + maxOutstandingWriteRequests = randomIntBetween(0, Integer.MAX_VALUE); } + Integer maxReadRequestOperationCount = null; if (randomBoolean()) { - pattern.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + maxReadRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE); } + ByteSizeValue maxReadRequestSize = null; if (randomBoolean()) { - pattern.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + maxReadRequestSize = new ByteSizeValue(randomNonNegativeLong()); } + Integer maxWriteBufferCount = null; if (randomBoolean()) { - pattern.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + maxWriteBufferCount = randomIntBetween(0, Integer.MAX_VALUE); } + ByteSizeValue maxWriteBufferSize = null; if (randomBoolean()) { - pattern.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + maxWriteBufferSize = new ByteSizeValue(randomNonNegativeLong()); } + Integer maxWriteRequestOperationCount = null; if (randomBoolean()) { - pattern.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + maxWriteRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE); } + ByteSizeValue maxWriteRequestSize = null; if (randomBoolean()) { - pattern.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + maxWriteRequestSize = new ByteSizeValue(randomNonNegativeLong()); } + TimeValue maxRetryDelay = null; if (randomBoolean()) { - pattern.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + maxRetryDelay = new TimeValue(randomNonNegativeLong()); } + TimeValue readPollTimeout = null; if (randomBoolean()) { - pattern.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + readPollTimeout = new TimeValue(randomNonNegativeLong()); } - patterns.put(randomAlphaOfLength(4), pattern); + patterns.put(randomAlphaOfLength(4), new AutoFollowMetadata.AutoFollowPattern(remoteCluster, leaderIndexPatters, + followIndexNamePattern, maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, + maxOutstandingWriteRequests, maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, + maxRetryDelay, readPollTimeout)); } - return new GetAutoFollowPatternResponse(patterns); + return new GetAutoFollowPatternAction.Response(patterns); } - public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - builder.startArray(GetAutoFollowPatternResponse.PATTERNS_FIELD.getPreferredName()); - for (Map.Entry entry : response.getPatterns().entrySet()) { - builder.startObject(); - { - builder.field(GetAutoFollowPatternResponse.NAME_FIELD.getPreferredName(), entry.getKey()); - builder.startObject(GetAutoFollowPatternResponse.PATTERN_FIELD.getPreferredName()); - { - GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); - builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); - builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); - if (pattern.getFollowIndexNamePattern()!= null) { - builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); - } - entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); + @Override + protected GetAutoFollowPatternResponse doParseToClientInstance(XContentParser parser) throws IOException { + return GetAutoFollowPatternResponse.fromXContent(parser); } + + @Override + protected void assertInstances(GetAutoFollowPatternAction.Response serverTestInstance, GetAutoFollowPatternResponse clientInstance) { + assertThat(serverTestInstance.getAutoFollowPatterns().size(), equalTo(clientInstance.getPatterns().size())); + for (Map.Entry entry : serverTestInstance.getAutoFollowPatterns().entrySet()) { + AutoFollowMetadata.AutoFollowPattern serverPattern = entry.getValue(); + GetAutoFollowPatternResponse.Pattern clientPattern = clientInstance.getPatterns().get(entry.getKey()); + assertThat(clientPattern, notNullValue()); + + assertThat(serverPattern.getRemoteCluster(), equalTo(clientPattern.getRemoteCluster())); + assertThat(serverPattern.getLeaderIndexPatterns(), equalTo(clientPattern.getLeaderIndexPatterns())); + assertThat(serverPattern.getFollowIndexPattern(), equalTo(clientPattern.getFollowIndexNamePattern())); + assertThat(serverPattern.getMaxOutstandingReadRequests(), equalTo(clientPattern.getMaxOutstandingReadRequests())); + assertThat(serverPattern.getMaxOutstandingWriteRequests(), equalTo(clientPattern.getMaxOutstandingWriteRequests())); + assertThat(serverPattern.getMaxReadRequestOperationCount(), equalTo(clientPattern.getMaxReadRequestOperationCount())); + assertThat(serverPattern.getMaxWriteRequestOperationCount(), equalTo(clientPattern.getMaxWriteRequestOperationCount())); + assertThat(serverPattern.getMaxReadRequestSize(), equalTo(clientPattern.getMaxReadRequestSize())); + assertThat(serverPattern.getMaxWriteRequestSize(), equalTo(clientPattern.getMaxWriteRequestSize())); + assertThat(serverPattern.getMaxWriteBufferCount(), equalTo(clientPattern.getMaxWriteBufferCount())); + assertThat(serverPattern.getMaxWriteBufferSize(), equalTo(clientPattern.getMaxWriteBufferSize())); + assertThat(serverPattern.getMaxRetryDelay(), equalTo(clientPattern.getMaxRetryDelay())); + assertThat(serverPattern.getReadPollTimeout(), equalTo(clientPattern.getReadPollTimeout())); + } + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java index 00bcf535f08..52fe70b3a39 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java @@ -19,35 +19,30 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import java.io.IOException; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; +import static org.hamcrest.Matchers.is; -public class PutFollowResponseTests extends ESTestCase { +public class PutFollowResponseTests extends AbstractResponseTestCase { - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - this::createTestInstance, - PutFollowResponseTests::toXContent, - PutFollowResponse::fromXContent) - .supportsUnknownFields(true) - .test(); + @Override + protected PutFollowAction.Response createServerTestInstance() { + return new PutFollowAction.Response(randomBoolean(), randomBoolean(), randomBoolean()); } - private PutFollowResponse createTestInstance() { - return new PutFollowResponse(randomBoolean(), randomBoolean(), randomBoolean()); + @Override + protected PutFollowResponse doParseToClientInstance(XContentParser parser) throws IOException { + return PutFollowResponse.fromXContent(parser); } - public static void toXContent(PutFollowResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - { - builder.field(PutFollowResponse.FOLLOW_INDEX_CREATED.getPreferredName(), response.isFollowIndexCreated()); - builder.field(PutFollowResponse.FOLLOW_INDEX_SHARDS_ACKED.getPreferredName(), response.isFollowIndexShardsAcked()); - builder.field(PutFollowResponse.INDEX_FOLLOWING_STARTED.getPreferredName(), response.isIndexFollowingStarted()); - } - builder.endObject(); + @Override + protected void assertInstances(PutFollowAction.Response serverTestInstance, PutFollowResponse clientInstance) { + assertThat(serverTestInstance.isFollowIndexCreated(), is(clientInstance.isFollowIndexCreated())); + assertThat(serverTestInstance.isFollowIndexShardsAcked(), is(clientInstance.isFollowIndexShardsAcked())); + assertThat(serverTestInstance.isIndexFollowingStarted(), is(clientInstance.isIndexFollowingStarted())); } } From ddf17dfb1e7699c90006fa2c8665334a2e1a4e76 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Mon, 8 Apr 2019 10:13:15 +0200 Subject: [PATCH 24/45] SQL: Small code improvements of Pipes & Processors (#40909) - Remove superfluous methods that are already defined in superclasses. - Improve tests for null folding on conditionals (cherry picked from commit 67f9404f5004362e569353d1e950ffe5d7a9ab6e) --- .../sql/expression/gen/pipeline/Pipe.java | 12 ++--- .../predicate/conditional/NullIf.java | 19 +------- .../conditional/NullIfProcessor.java | 8 +++- .../predicate/operator/comparison/InPipe.java | 48 ++++--------------- .../xpack/sql/optimizer/OptimizerTests.java | 35 ++++++++++---- 5 files changed, 45 insertions(+), 77 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java index b013714bf5a..675cde43c45 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java @@ -12,8 +12,8 @@ import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.ArrayList; import java.util.List; @@ -53,13 +53,7 @@ public abstract class Pipe extends Node implements FieldExtraction, Resolv @Override public boolean supportedByAggsOnlyQuery() { - for (Pipe pipe : children()) { - if (pipe.supportedByAggsOnlyQuery()) { - return true; - } - } - - return false; + return children().stream().anyMatch(Pipe::supportedByAggsOnlyQuery); } public abstract Processor asProcessor(); @@ -83,4 +77,4 @@ public abstract class Pipe extends Node implements FieldExtraction, Resolv public interface AttributeResolver { FieldExtraction resolve(Attribute attribute); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java index ef1a71c7ed4..dac5add1792 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java @@ -8,13 +8,11 @@ package org.elasticsearch.xpack.sql.expression.predicate.conditional; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; -import org.elasticsearch.xpack.sql.expression.Nullability; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.Arrays; import java.util.List; @@ -47,21 +45,6 @@ public class NullIf extends ConditionalFunction { return TypeResolution.TYPE_RESOLVED; } - @Override - public DataType dataType() { - return dataType; - } - - @Override - public boolean foldable() { - return Expressions.foldable(children()); - } - - @Override - public Nullability nullable() { - return Nullability.UNKNOWN; - } - @Override public Object fold() { return NullIfProcessor.apply(children().get(0).fold(), children().get(1).fold()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIfProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIfProcessor.java index 89d0b01ffed..06bc04eae24 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIfProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIfProcessor.java @@ -59,8 +59,12 @@ public class NullIfProcessor implements Processor { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } NullIfProcessor that = (NullIfProcessor) o; return Objects.equals(leftProcessor, that.leftProcessor) && Objects.equals(rightProcessor, that.rightProcessor); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InPipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InPipe.java index 77c6d2bdc76..fa7c06a5e3f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InPipe.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InPipe.java @@ -5,26 +5,20 @@ */ package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; -import org.elasticsearch.xpack.sql.capabilities.Resolvables; -import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; -import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.MultiPipe; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; -import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; -public class InPipe extends Pipe { - - private List pipes; +public class InPipe extends MultiPipe { public InPipe(Source source, Expression expression, List pipes) { super(source, expression, pipes); - this.pipes = pipes; } @Override @@ -37,36 +31,12 @@ public class InPipe extends Pipe { @Override protected NodeInfo info() { - return NodeInfo.create(this, InPipe::new, expression(), pipes); - } - - @Override - public boolean supportedByAggsOnlyQuery() { - return pipes.stream().allMatch(FieldExtraction::supportedByAggsOnlyQuery); - } - - @Override - public final Pipe resolveAttributes(AttributeResolver resolver) { - List newPipes = new ArrayList<>(pipes.size()); - for (Pipe p : pipes) { - newPipes.add(p.resolveAttributes(resolver)); - } - return replaceChildren(newPipes); - } - - @Override - public boolean resolved() { - return Resolvables.resolved(pipes); - } - - @Override - public final void collectFields(SqlSourceBuilder sourceBuilder) { - pipes.forEach(p -> p.collectFields(sourceBuilder)); + return NodeInfo.create(this, InPipe::new, expression(), children()); } @Override public int hashCode() { - return Objects.hash(pipes); + return Objects.hash(children()); } @Override @@ -80,11 +50,11 @@ public class InPipe extends Pipe { } InPipe other = (InPipe) obj; - return Objects.equals(pipes, other.pipes); + return Objects.equals(children(), other.children()); } @Override - public InProcessor asProcessor() { - return new InProcessor(pipes.stream().map(Pipe::asProcessor).collect(Collectors.toList())); + public InProcessor asProcessor(List processors) { + return new InProcessor(processors); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index 7c01fd8ff15..8e4c9c7dd59 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -43,7 +43,9 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Repeat; import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.ArbitraryConditionalFunction; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Coalesce; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.ConditionalFunction; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Greatest; import org.elasticsearch.xpack.sql.expression.predicate.conditional.IfNull; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Least; @@ -97,6 +99,7 @@ import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.util.CollectionUtils; import org.elasticsearch.xpack.sql.util.StringUtils; +import java.lang.reflect.Constructor; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -445,18 +448,32 @@ public class OptimizerTests extends ESTestCase { assertEquals(and, rule.rule(and)); } - public void testNullFoldingDoesNotApplyOnConditionals() { + @SuppressWarnings("unchecked") + public void testNullFoldingDoesNotApplyOnConditionals() throws Exception { FoldNull rule = new FoldNull(); - Coalesce coalesce = new Coalesce(EMPTY, Arrays.asList(Literal.NULL, ONE, TWO)); - assertEquals(coalesce, rule.rule(coalesce)); - coalesce = new Coalesce(EMPTY, Arrays.asList(Literal.NULL, NULL, NULL)); - assertEquals(coalesce, rule.rule(coalesce)); + Class clazz = + (Class) randomFrom(IfNull.class, NullIf.class); + Constructor ctor = clazz.getConstructor(Source.class, Expression.class, Expression.class); + ConditionalFunction conditionalFunction = ctor.newInstance(EMPTY, Literal.NULL, ONE); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + conditionalFunction = ctor.newInstance(EMPTY, ONE, Literal.NULL); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + conditionalFunction = ctor.newInstance(EMPTY, Literal.NULL, Literal.NULL); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + } - Greatest greatest = new Greatest(EMPTY, Arrays.asList(Literal.NULL, ONE, TWO)); - assertEquals(greatest, rule.rule(greatest)); - greatest = new Greatest(EMPTY, Arrays.asList(Literal.NULL, ONE, TWO)); - assertEquals(greatest, rule.rule(greatest)); + @SuppressWarnings("unchecked") + public void testNullFoldingDoesNotApplyOnArbitraryConditionals() throws Exception { + FoldNull rule = new FoldNull(); + + Class clazz = + (Class) randomFrom(Coalesce.class, Greatest.class, Least.class); + Constructor ctor = clazz.getConstructor(Source.class, List.class); + ArbitraryConditionalFunction conditionalFunction = ctor.newInstance(EMPTY, Arrays.asList(Literal.NULL, ONE, TWO)); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + conditionalFunction = ctor.newInstance(EMPTY, Arrays.asList(Literal.NULL, NULL, NULL)); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); } public void testSimplifyCoalesceNulls() { From 84a410d5a89d7ddaaf0ea9f4ecf66a79ddd8421c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 8 Apr 2019 11:24:06 +0200 Subject: [PATCH 25/45] Revert "Change HLRC CCR response tests to use AbstractResponseTestCase base class. (#40257)" This reverts commit c29027d99e969d464fcb461921ae5b0de6f7d086. --- client/rest-high-level/build.gradle | 3 - .../client/ccr/CcrStatsResponseTests.java | 411 ++++++++++++------ .../client/ccr/FollowInfoResponseTests.java | 118 ++--- .../client/ccr/FollowStatsResponseTests.java | 299 +++++++++---- .../GetAutoFollowPatternResponseTests.java | 126 +++--- .../client/ccr/PutFollowResponseTests.java | 37 +- 6 files changed, 621 insertions(+), 373 deletions(-) diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 420bd6d7414..44262f09346 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -64,9 +64,6 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" //this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs testCompile "org.elasticsearch:rest-api-spec:${version}" - // Needed for serialization tests: - // (In order to serialize a server side class to a client side class or the other way around) - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" restSpec "org.elasticsearch:rest-api-spec:${version}" } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java index d56b762520c..cb8072f6baf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/CcrStatsResponseTests.java @@ -20,110 +20,50 @@ package org.elasticsearch.client.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.client.ccr.AutoFollowStats.AutoFollowedCluster; import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class CcrStatsResponseTests extends AbstractResponseTestCase { +public class CcrStatsResponseTests extends ESTestCase { - @Override - protected CcrStatsAction.Response createServerTestInstance() { - org.elasticsearch.xpack.core.ccr.AutoFollowStats autoFollowStats = new org.elasticsearch.xpack.core.ccr.AutoFollowStats( - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomReadExceptions(), - randomTrackingClusters() - ); - FollowStatsAction.StatsResponses statsResponse = createStatsResponse(); - return new CcrStatsAction.Response(autoFollowStats, statsResponse); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + CcrStatsResponseTests::createTestInstance, + CcrStatsResponseTests::toXContent, + CcrStatsResponse::fromXContent) + .supportsUnknownFields(true) + .assertEqualsConsumer(CcrStatsResponseTests::assertEqualInstances) + .assertToXContentEquivalence(false) + .test(); } - static NavigableMap> randomReadExceptions() { - final int count = randomIntBetween(0, 16); - final NavigableMap> readExceptions = new TreeMap<>(); - for (int i = 0; i < count; i++) { - readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(), - new ElasticsearchException(new IllegalStateException("index [" + i + "]")))); - } - return readExceptions; - } + // Needed, because exceptions in IndicesFollowStats and AutoFollowStats cannot be compared + private static void assertEqualInstances(CcrStatsResponse expectedInstance, CcrStatsResponse newInstance) { + assertNotSame(expectedInstance, newInstance); - static NavigableMap randomTrackingClusters() { - final int count = randomIntBetween(0, 16); - final NavigableMap readExceptions = new TreeMap<>(); - for (int i = 0; i < count; i++) { - readExceptions.put("" + i, - new org.elasticsearch.xpack.core.ccr.AutoFollowStats.AutoFollowedCluster(randomLong(), randomNonNegativeLong())); - } - return readExceptions; - } - - static FollowStatsAction.StatsResponses createStatsResponse() { - int numResponses = randomIntBetween(0, 8); - List responses = new ArrayList<>(numResponses); - for (int i = 0; i < numResponses; i++) { - ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomInt(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - Collections.emptyNavigableMap(), - randomLong(), - randomBoolean() ? new ElasticsearchException("fatal error") : null); - responses.add(new FollowStatsAction.StatsResponse(status)); - } - return new FollowStatsAction.StatsResponses(Collections.emptyList(), Collections.emptyList(), responses); - } - - @Override - protected CcrStatsResponse doParseToClientInstance(XContentParser parser) throws IOException { - return CcrStatsResponse.fromXContent(parser); - } - - @Override - protected void assertInstances(CcrStatsAction.Response serverTestInstance, CcrStatsResponse clientInstance) { { - AutoFollowStats newAutoFollowStats = clientInstance.getAutoFollowStats(); - org.elasticsearch.xpack.core.ccr.AutoFollowStats expectedAutoFollowStats = serverTestInstance.getAutoFollowStats(); + AutoFollowStats newAutoFollowStats = newInstance.getAutoFollowStats(); + AutoFollowStats expectedAutoFollowStats = expectedInstance.getAutoFollowStats(); assertThat(newAutoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(expectedAutoFollowStats.getNumberOfSuccessfulFollowIndices())); assertThat(newAutoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), @@ -149,69 +89,62 @@ public class CcrStatsResponseTests extends AbstractResponseTestCase> expectedIndicesFollowStats = new TreeMap<>(); - for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getFollowStats().getStatsResponses()) { - expectedIndicesFollowStats.computeIfAbsent( - statsResponse.status().followerIndex(), - k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); - } + IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats(); + IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats(); assertThat(newIndicesFollowStats.getShardFollowStats().size(), - equalTo(expectedIndicesFollowStats.size())); + equalTo(expectedIndicesFollowStats.getShardFollowStats().size())); assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), - equalTo(expectedIndicesFollowStats.keySet())); + equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet())); for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { List newStats = indexEntry.getValue(); - Map expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey()); + List expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey()); assertThat(newStats.size(), equalTo(expectedStats.size())); for (int i = 0; i < newStats.size(); i++) { ShardFollowStats actualShardFollowStats = newStats.get(i); - ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status(); + ShardFollowStats expectedShardFollowStats = expectedStats.get(i); assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); - assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex())); - assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex())); + assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex())); + assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex())); assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), - equalTo(expectedShardFollowStats.leaderGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo())); + equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo())); assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), - equalTo(expectedShardFollowStats.followerGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo())); + equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo())); assertThat(actualShardFollowStats.getOutstandingReadRequests(), - equalTo(expectedShardFollowStats.outstandingReadRequests())); + equalTo(expectedShardFollowStats.getOutstandingReadRequests())); assertThat(actualShardFollowStats.getOutstandingWriteRequests(), - equalTo(expectedShardFollowStats.outstandingWriteRequests())); + equalTo(expectedShardFollowStats.getOutstandingWriteRequests())); assertThat(actualShardFollowStats.getWriteBufferOperationCount(), - equalTo(expectedShardFollowStats.writeBufferOperationCount())); + equalTo(expectedShardFollowStats.getWriteBufferOperationCount())); assertThat(actualShardFollowStats.getFollowerMappingVersion(), - equalTo(expectedShardFollowStats.followerMappingVersion())); + equalTo(expectedShardFollowStats.getFollowerMappingVersion())); assertThat(actualShardFollowStats.getFollowerSettingsVersion(), - equalTo(expectedShardFollowStats.followerSettingsVersion())); + equalTo(expectedShardFollowStats.getFollowerSettingsVersion())); assertThat(actualShardFollowStats.getTotalReadTimeMillis(), - equalTo(expectedShardFollowStats.totalReadTimeMillis())); + equalTo(expectedShardFollowStats.getTotalReadTimeMillis())); assertThat(actualShardFollowStats.getSuccessfulReadRequests(), - equalTo(expectedShardFollowStats.successfulReadRequests())); - assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests())); - assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads())); - assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead())); + equalTo(expectedShardFollowStats.getSuccessfulReadRequests())); + assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests())); + assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads())); + assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead())); assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), - equalTo(expectedShardFollowStats.totalWriteTimeMillis())); + equalTo(expectedShardFollowStats.getTotalWriteTimeMillis())); assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), - equalTo(expectedShardFollowStats.successfulWriteRequests())); + equalTo(expectedShardFollowStats.getSuccessfulWriteRequests())); assertThat(actualShardFollowStats.getFailedWriteRequests(), - equalTo(expectedShardFollowStats.failedWriteRequests())); - assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten())); + equalTo(expectedShardFollowStats.getFailedWriteRequests())); + assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten())); assertThat(actualShardFollowStats.getReadExceptions().size(), - equalTo(expectedShardFollowStats.readExceptions().size())); + equalTo(expectedShardFollowStats.getReadExceptions().size())); assertThat(actualShardFollowStats.getReadExceptions().keySet(), - equalTo(expectedShardFollowStats.readExceptions().keySet())); + equalTo(expectedShardFollowStats.getReadExceptions().keySet())); for (final Map.Entry> entry : actualShardFollowStats.getReadExceptions().entrySet()) { final Tuple expectedTuple = - expectedShardFollowStats.readExceptions().get(entry.getKey()); + expectedShardFollowStats.getReadExceptions().get(entry.getKey()); assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); // x-content loses the exception final ElasticsearchException expected = expectedTuple.v2(); @@ -223,10 +156,246 @@ public class CcrStatsResponseTests extends AbstractResponseTestCase> entry : + autoFollowStats.getRecentAutoFollowErrors().entrySet()) { + builder.startObject(); + { + builder.field(AutoFollowStats.LEADER_INDEX.getPreferredName(), entry.getKey()); + builder.field(AutoFollowStats.TIMESTAMP.getPreferredName(), entry.getValue().v1()); + builder.field(AutoFollowStats.AUTO_FOLLOW_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, entry.getValue().v2()); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + builder.startArray(AutoFollowStats.AUTO_FOLLOWED_CLUSTERS.getPreferredName()); + for (Map.Entry entry : autoFollowStats.getAutoFollowedClusters().entrySet()) { + builder.startObject(); + { + builder.field(AutoFollowStats.CLUSTER_NAME.getPreferredName(), entry.getKey()); + builder.field(AutoFollowStats.TIME_SINCE_LAST_CHECK_MILLIS.getPreferredName(), + entry.getValue().getTimeSinceLastCheckMillis()); + builder.field(AutoFollowStats.LAST_SEEN_METADATA_VERSION.getPreferredName(), + entry.getValue().getLastSeenMetadataVersion()); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + IndicesFollowStats indicesFollowStats = response.getIndicesFollowStats(); + builder.startObject(CcrStatsResponse.FOLLOW_STATS_FIELD.getPreferredName()); + { + builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName()); + for (Map.Entry> indexEntry : + indicesFollowStats.getShardFollowStats().entrySet()) { + builder.startObject(); + { + builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey()); + builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName()); + { + for (ShardFollowStats stats : indexEntry.getValue()) { + builder.startObject(); + { + builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster()); + builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex()); + builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex()); + builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId()); + builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), + stats.getLeaderGlobalCheckpoint()); + builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo()); + builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), + stats.getFollowerGlobalCheckpoint()); + builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), + stats.getFollowerMaxSeqNo()); + builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), + stats.getLastRequestedSeqNo()); + builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(), + stats.getOutstandingReadRequests()); + builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(), + stats.getOutstandingWriteRequests()); + builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), + stats.getWriteBufferOperationCount()); + builder.humanReadableField( + ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), + "write_buffer_size", + new ByteSizeValue(stats.getWriteBufferSizeInBytes())); + builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), + stats.getFollowerMappingVersion()); + builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(), + stats.getFollowerSettingsVersion()); + builder.humanReadableField( + ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), + "total_read_time", + new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS)); + builder.humanReadableField( + ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), + "total_read_remote_exec_time", + new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS)); + builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), + stats.getSuccessfulReadRequests()); + builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(), + stats.getFailedReadRequests()); + builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads()); + builder.humanReadableField( + ShardFollowStats.BYTES_READ.getPreferredName(), + "total_read", + new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES)); + builder.humanReadableField( + ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), + "total_write_time", + new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS)); + builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), + stats.getSuccessfulWriteRequests()); + builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(), + stats.getFailedWriteRequests()); + builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten()); + builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry> entry : + stats.getReadExceptions().entrySet()) { + builder.startObject(); + { + builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), + entry.getKey()); + builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(), + entry.getValue().v1()); + builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, + entry.getValue().v2()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), + "time_since_last_read", + new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS)); + if (stats.getFatalException() != null) { + builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, + stats.getFatalException()); + } + builder.endObject(); + } + } + builder.endObject(); + } + } + builder.endArray(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + } + builder.endObject(); + } + + private static CcrStatsResponse createTestInstance() { + return new CcrStatsResponse(randomAutoFollowStats(), randomIndicesFollowStats()); + } + + private static AutoFollowStats randomAutoFollowStats() { + final int count = randomIntBetween(0, 16); + final NavigableMap> readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(), + new ElasticsearchException(new IllegalStateException("index [" + i + "]")))); + } + final NavigableMap autoFollowClusters = new TreeMap<>(); + for (int i = 0; i < count; i++) { + autoFollowClusters.put("" + i, new AutoFollowedCluster(randomLong(), randomNonNegativeLong())); + } + return new AutoFollowStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + readExceptions, + autoFollowClusters + ); + } + + static IndicesFollowStats randomIndicesFollowStats() { + int numIndices = randomIntBetween(0, 16); + NavigableMap> shardFollowStats = new TreeMap<>(); + for (int i = 0; i < numIndices; i++) { + String index = randomAlphaOfLength(4); + int numShards = randomIntBetween(0, 5); + List stats = new ArrayList<>(numShards); + shardFollowStats.put(index, stats); + for (int j = 0; j < numShards; j++) { + final int count = randomIntBetween(0, 16); + final NavigableMap> readExceptions = new TreeMap<>(); + for (long k = 0; k < count; k++) { + readExceptions.put(k, new Tuple<>(randomIntBetween(0, Integer.MAX_VALUE), + new ElasticsearchException(new IllegalStateException("index [" + k + "]")))); + } + + stats.add(new ShardFollowStats( + randomAlphaOfLength(4), + randomAlphaOfLength(4), + randomAlphaOfLength(4), + randomInt(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomLong(), + readExceptions, + randomBoolean() ? new ElasticsearchException("fatal error") : null)); + } + } + return new IndicesFollowStats(shardFollowStats); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java index 2c5bfba5025..5cd327495dc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java @@ -19,89 +19,59 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.client.AbstractResponseTestCase; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; -import org.elasticsearch.xpack.core.ccr.action.FollowParameters; +import org.elasticsearch.client.ccr.FollowInfoResponse.FollowerInfo; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Locale; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; -public class FollowInfoResponseTests extends AbstractResponseTestCase { +public class FollowInfoResponseTests extends ESTestCase { - @Override - protected FollowInfoAction.Response createServerTestInstance() { - int numInfos = randomIntBetween(0, 32); - List infos = new ArrayList<>(numInfos); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + FollowInfoResponseTests::createTestInstance, + FollowInfoResponseTests::toXContent, + FollowInfoResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + private static void toXContent(FollowInfoResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.startArray(FollowInfoResponse.FOLLOWER_INDICES_FIELD.getPreferredName()); + for (FollowerInfo info : response.getInfos()) { + builder.startObject(); + builder.field(FollowerInfo.FOLLOWER_INDEX_FIELD.getPreferredName(), info.getFollowerIndex()); + builder.field(FollowerInfo.REMOTE_CLUSTER_FIELD.getPreferredName(), info.getRemoteCluster()); + builder.field(FollowerInfo.LEADER_INDEX_FIELD.getPreferredName(), info.getLeaderIndex()); + builder.field(FollowerInfo.STATUS_FIELD.getPreferredName(), info.getStatus().getName()); + if (info.getParameters() != null) { + builder.startObject(FollowerInfo.PARAMETERS_FIELD.getPreferredName()); + { + info.getParameters().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + + private static FollowInfoResponse createTestInstance() { + int numInfos = randomIntBetween(0, 64); + List infos = new ArrayList<>(numInfos); for (int i = 0; i < numInfos; i++) { - FollowParameters followParameters = null; - if (randomBoolean()) { - followParameters = randomFollowParameters(); - } - - infos.add(new FollowInfoAction.Response.FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), - randomFrom(FollowInfoAction.Response.Status.values()), followParameters)); - } - return new FollowInfoAction.Response(infos); - } - - static FollowParameters randomFollowParameters() { - FollowParameters followParameters = new FollowParameters(); - followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); - followParameters.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); - followParameters.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); - followParameters.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); - followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); - followParameters.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); - followParameters.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); - followParameters.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); - followParameters.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); - followParameters.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); - return followParameters; - } - - @Override - protected FollowInfoResponse doParseToClientInstance(XContentParser parser) throws IOException { - return FollowInfoResponse.fromXContent(parser); - } - - @Override - protected void assertInstances(FollowInfoAction.Response serverTestInstance, FollowInfoResponse clientInstance) { - assertThat(serverTestInstance.getFollowInfos().size(), equalTo(clientInstance.getInfos().size())); - for (int i = 0; i < serverTestInstance.getFollowInfos().size(); i++) { - FollowInfoAction.Response.FollowerInfo serverFollowInfo = serverTestInstance.getFollowInfos().get(i); - FollowInfoResponse.FollowerInfo clientFollowerInfo = clientInstance.getInfos().get(i); - - assertThat(serverFollowInfo.getRemoteCluster(), equalTo(clientFollowerInfo.getRemoteCluster())); - assertThat(serverFollowInfo.getLeaderIndex(), equalTo(clientFollowerInfo.getLeaderIndex())); - assertThat(serverFollowInfo.getFollowerIndex(), equalTo(clientFollowerInfo.getFollowerIndex())); - assertThat(serverFollowInfo.getStatus().toString().toLowerCase(Locale.ROOT), - equalTo(clientFollowerInfo.getStatus().getName().toLowerCase(Locale.ROOT))); - - FollowParameters serverParams = serverFollowInfo.getParameters(); - FollowConfig clientParams = clientFollowerInfo.getParameters(); - if (serverParams != null) { - assertThat(serverParams.getMaxReadRequestOperationCount(), equalTo(clientParams.getMaxReadRequestOperationCount())); - assertThat(serverParams.getMaxWriteRequestOperationCount(), equalTo(clientParams.getMaxWriteRequestOperationCount())); - assertThat(serverParams.getMaxOutstandingReadRequests(), equalTo(clientParams.getMaxOutstandingReadRequests())); - assertThat(serverParams.getMaxOutstandingWriteRequests(), equalTo(clientParams.getMaxOutstandingWriteRequests())); - assertThat(serverParams.getMaxReadRequestSize(), equalTo(clientParams.getMaxReadRequestSize())); - assertThat(serverParams.getMaxWriteRequestSize(), equalTo(clientParams.getMaxWriteRequestSize())); - assertThat(serverParams.getMaxWriteBufferCount(), equalTo(clientParams.getMaxWriteBufferCount())); - assertThat(serverParams.getMaxWriteBufferSize(), equalTo(clientParams.getMaxWriteBufferSize())); - assertThat(serverParams.getMaxRetryDelay(), equalTo(clientParams.getMaxRetryDelay())); - assertThat(serverParams.getReadPollTimeout(), equalTo(clientParams.getReadPollTimeout())); - } else { - assertThat(clientParams, nullValue()); - } + FollowInfoResponse.Status status = randomFrom(FollowInfoResponse.Status.values()); + FollowConfig followConfig = randomBoolean() ? FollowConfigTests.createTestInstance() : null; + infos.add(new FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), status, followConfig)); } + return new FollowInfoResponse(infos); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java index cd7257342c7..5ec3cb4edcf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowStatsResponseTests.java @@ -20,115 +20,234 @@ package org.elasticsearch.client.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.TreeMap; +import java.util.concurrent.TimeUnit; -import static org.elasticsearch.client.ccr.CcrStatsResponseTests.createStatsResponse; +import static org.elasticsearch.client.ccr.CcrStatsResponseTests.randomIndicesFollowStats; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class FollowStatsResponseTests extends AbstractResponseTestCase { +public class FollowStatsResponseTests extends ESTestCase { - @Override - protected FollowStatsAction.StatsResponses createServerTestInstance() { - return createStatsResponse(); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + FollowStatsResponseTests::createTestInstance, + FollowStatsResponseTests::toXContent, + FollowStatsResponse::fromXContent) + .supportsUnknownFields(true) + .assertEqualsConsumer(FollowStatsResponseTests::assertEqualInstances) + .assertToXContentEquivalence(false) + .test(); } - @Override - protected FollowStatsResponse doParseToClientInstance(XContentParser parser) throws IOException { - return FollowStatsResponse.fromXContent(parser); - } + // Needed, because exceptions in IndicesFollowStats cannot be compared + private static void assertEqualInstances(FollowStatsResponse expectedInstance, FollowStatsResponse newInstance) { + assertNotSame(expectedInstance, newInstance); + { + IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats(); + IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats(); + assertThat(newIndicesFollowStats.getShardFollowStats().size(), + equalTo(expectedIndicesFollowStats.getShardFollowStats().size())); + assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), + equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet())); + for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { + List newStats = indexEntry.getValue(); + List expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey()); + assertThat(newStats.size(), equalTo(expectedStats.size())); + for (int i = 0; i < newStats.size(); i++) { + ShardFollowStats actualShardFollowStats = newStats.get(i); + ShardFollowStats expectedShardFollowStats = expectedStats.get(i); - @Override - protected void assertInstances(FollowStatsAction.StatsResponses serverTestInstance, FollowStatsResponse clientInstance) { - IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats(); - - // sort by index name, then shard ID - final Map> expectedIndicesFollowStats = new TreeMap<>(); - for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getStatsResponses()) { - expectedIndicesFollowStats.computeIfAbsent( - statsResponse.status().followerIndex(), - k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); - } - assertThat(newIndicesFollowStats.getShardFollowStats().size(), - equalTo(expectedIndicesFollowStats.size())); - assertThat(newIndicesFollowStats.getShardFollowStats().keySet(), - equalTo(expectedIndicesFollowStats.keySet())); - for (Map.Entry> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) { - List newStats = indexEntry.getValue(); - Map expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey()); - assertThat(newStats.size(), equalTo(expectedStats.size())); - for (int i = 0; i < newStats.size(); i++) { - ShardFollowStats actualShardFollowStats = newStats.get(i); - ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status(); - - assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); - assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex())); - assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex())); - assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); - assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), - equalTo(expectedShardFollowStats.leaderGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo())); - assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), - equalTo(expectedShardFollowStats.followerGlobalCheckpoint())); - assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo())); - assertThat(actualShardFollowStats.getOutstandingReadRequests(), - equalTo(expectedShardFollowStats.outstandingReadRequests())); - assertThat(actualShardFollowStats.getOutstandingWriteRequests(), - equalTo(expectedShardFollowStats.outstandingWriteRequests())); - assertThat(actualShardFollowStats.getWriteBufferOperationCount(), - equalTo(expectedShardFollowStats.writeBufferOperationCount())); - assertThat(actualShardFollowStats.getFollowerMappingVersion(), - equalTo(expectedShardFollowStats.followerMappingVersion())); - assertThat(actualShardFollowStats.getFollowerSettingsVersion(), - equalTo(expectedShardFollowStats.followerSettingsVersion())); - assertThat(actualShardFollowStats.getTotalReadTimeMillis(), - equalTo(expectedShardFollowStats.totalReadTimeMillis())); - assertThat(actualShardFollowStats.getSuccessfulReadRequests(), - equalTo(expectedShardFollowStats.successfulReadRequests())); - assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests())); - assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads())); - assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead())); - assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), - equalTo(expectedShardFollowStats.totalWriteTimeMillis())); - assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), - equalTo(expectedShardFollowStats.successfulWriteRequests())); - assertThat(actualShardFollowStats.getFailedWriteRequests(), - equalTo(expectedShardFollowStats.failedWriteRequests())); - assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten())); - assertThat(actualShardFollowStats.getReadExceptions().size(), - equalTo(expectedShardFollowStats.readExceptions().size())); - assertThat(actualShardFollowStats.getReadExceptions().keySet(), - equalTo(expectedShardFollowStats.readExceptions().keySet())); - for (final Map.Entry> entry : - actualShardFollowStats.getReadExceptions().entrySet()) { - final Tuple expectedTuple = - expectedShardFollowStats.readExceptions().get(entry.getKey()); - assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); - // x-content loses the exception - final ElasticsearchException expected = expectedTuple.v2(); - assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage())); - assertNotNull(entry.getValue().v2().getCause()); - assertThat( - entry.getValue().v2().getCause(), - anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); - assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); + assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster())); + assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex())); + assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex())); + assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId())); + assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(), + equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo())); + assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(), + equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint())); + assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo())); + assertThat(actualShardFollowStats.getOutstandingReadRequests(), + equalTo(expectedShardFollowStats.getOutstandingReadRequests())); + assertThat(actualShardFollowStats.getOutstandingWriteRequests(), + equalTo(expectedShardFollowStats.getOutstandingWriteRequests())); + assertThat(actualShardFollowStats.getWriteBufferOperationCount(), + equalTo(expectedShardFollowStats.getWriteBufferOperationCount())); + assertThat(actualShardFollowStats.getFollowerMappingVersion(), + equalTo(expectedShardFollowStats.getFollowerMappingVersion())); + assertThat(actualShardFollowStats.getFollowerSettingsVersion(), + equalTo(expectedShardFollowStats.getFollowerSettingsVersion())); + assertThat(actualShardFollowStats.getTotalReadTimeMillis(), + equalTo(expectedShardFollowStats.getTotalReadTimeMillis())); + assertThat(actualShardFollowStats.getSuccessfulReadRequests(), + equalTo(expectedShardFollowStats.getSuccessfulReadRequests())); + assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests())); + assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads())); + assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead())); + assertThat(actualShardFollowStats.getTotalWriteTimeMillis(), + equalTo(expectedShardFollowStats.getTotalWriteTimeMillis())); + assertThat(actualShardFollowStats.getSuccessfulWriteRequests(), + equalTo(expectedShardFollowStats.getSuccessfulWriteRequests())); + assertThat(actualShardFollowStats.getFailedWriteRequests(), + equalTo(expectedShardFollowStats.getFailedWriteRequests())); + assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten())); + assertThat(actualShardFollowStats.getReadExceptions().size(), + equalTo(expectedShardFollowStats.getReadExceptions().size())); + assertThat(actualShardFollowStats.getReadExceptions().keySet(), + equalTo(expectedShardFollowStats.getReadExceptions().keySet())); + for (final Map.Entry> entry : + actualShardFollowStats.getReadExceptions().entrySet()) { + final Tuple expectedTuple = + expectedShardFollowStats.getReadExceptions().get(entry.getKey()); + assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); + // x-content loses the exception + final ElasticsearchException expected = expectedTuple.v2(); + assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().v2().getCause()); + assertThat( + entry.getValue().v2().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); + } + assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), + equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis())); } - assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(), - equalTo(expectedShardFollowStats.timeSinceLastReadMillis())); } } } + private static void toXContent(FollowStatsResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName()); + for (Map.Entry> indexEntry : + response.getIndicesFollowStats().getShardFollowStats().entrySet()) { + builder.startObject(); + { + builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey()); + builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName()); + { + for (ShardFollowStats stats : indexEntry.getValue()) { + builder.startObject(); + { + builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster()); + builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex()); + builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex()); + builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId()); + builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), + stats.getLeaderGlobalCheckpoint()); + builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo()); + builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), + stats.getFollowerGlobalCheckpoint()); + builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), + stats.getFollowerMaxSeqNo()); + builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), + stats.getLastRequestedSeqNo()); + builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(), + stats.getOutstandingReadRequests()); + builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(), + stats.getOutstandingWriteRequests()); + builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), + stats.getWriteBufferOperationCount()); + builder.humanReadableField( + ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), + "write_buffer_size", + new ByteSizeValue(stats.getWriteBufferSizeInBytes())); + builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), + stats.getFollowerMappingVersion()); + builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(), + stats.getFollowerSettingsVersion()); + builder.humanReadableField( + ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), + "total_read_time", + new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS)); + builder.humanReadableField( + ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), + "total_read_remote_exec_time", + new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS)); + builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), + stats.getSuccessfulReadRequests()); + builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(), + stats.getFailedReadRequests()); + builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads()); + builder.humanReadableField( + ShardFollowStats.BYTES_READ.getPreferredName(), + "total_read", + new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES)); + builder.humanReadableField( + ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), + "total_write_time", + new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS)); + builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), + stats.getSuccessfulWriteRequests()); + builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(), + stats.getFailedWriteRequests()); + builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten()); + builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry> entry : + stats.getReadExceptions().entrySet()) { + builder.startObject(); + { + builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), + entry.getKey()); + builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(), + entry.getValue().v1()); + builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, + entry.getValue().v2()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), + "time_since_last_read", + new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS)); + if (stats.getFatalException() != null) { + builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, + stats.getFatalException()); + } + builder.endObject(); + } + } + builder.endObject(); + } + } + builder.endArray(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + } + + private static FollowStatsResponse createTestInstance() { + return new FollowStatsResponse(randomIndicesFollowStats()); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java index 65ef3aa062d..f6f0f1747e2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java @@ -19,111 +19,99 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; -import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD; +import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD; +import static org.elasticsearch.client.ccr.PutFollowRequest.REMOTE_CLUSTER_FIELD; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; -public class GetAutoFollowPatternResponseTests extends AbstractResponseTestCase< - GetAutoFollowPatternAction.Response, - GetAutoFollowPatternResponse> { +public class GetAutoFollowPatternResponseTests extends ESTestCase { - @Override - protected GetAutoFollowPatternAction.Response createServerTestInstance() { + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + this::createTestInstance, + GetAutoFollowPatternResponseTests::toXContent, + GetAutoFollowPatternResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + private GetAutoFollowPatternResponse createTestInstance() { int numPatterns = randomIntBetween(0, 16); - NavigableMap patterns = new TreeMap<>(); + NavigableMap patterns = new TreeMap<>(); for (int i = 0; i < numPatterns; i++) { - String remoteCluster = randomAlphaOfLength(4); - List leaderIndexPatters = Collections.singletonList(randomAlphaOfLength(4)); - String followIndexNamePattern = randomAlphaOfLength(4); - - Integer maxOutstandingReadRequests = null; + GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern( + randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4)); if (randomBoolean()) { - maxOutstandingReadRequests = randomIntBetween(0, Integer.MAX_VALUE); + pattern.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); } - Integer maxOutstandingWriteRequests = null; if (randomBoolean()) { - maxOutstandingWriteRequests = randomIntBetween(0, Integer.MAX_VALUE); + pattern.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); } - Integer maxReadRequestOperationCount = null; if (randomBoolean()) { - maxReadRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE); + pattern.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); } - ByteSizeValue maxReadRequestSize = null; if (randomBoolean()) { - maxReadRequestSize = new ByteSizeValue(randomNonNegativeLong()); + pattern.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); } - Integer maxWriteBufferCount = null; if (randomBoolean()) { - maxWriteBufferCount = randomIntBetween(0, Integer.MAX_VALUE); + pattern.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); } - ByteSizeValue maxWriteBufferSize = null; if (randomBoolean()) { - maxWriteBufferSize = new ByteSizeValue(randomNonNegativeLong()); + pattern.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); } - Integer maxWriteRequestOperationCount = null; if (randomBoolean()) { - maxWriteRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE); + pattern.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); } - ByteSizeValue maxWriteRequestSize = null; if (randomBoolean()) { - maxWriteRequestSize = new ByteSizeValue(randomNonNegativeLong()); + pattern.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); } - TimeValue maxRetryDelay = null; if (randomBoolean()) { - maxRetryDelay = new TimeValue(randomNonNegativeLong()); + pattern.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); } - TimeValue readPollTimeout = null; if (randomBoolean()) { - readPollTimeout = new TimeValue(randomNonNegativeLong()); + pattern.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); } - patterns.put(randomAlphaOfLength(4), new AutoFollowMetadata.AutoFollowPattern(remoteCluster, leaderIndexPatters, - followIndexNamePattern, maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, - maxOutstandingWriteRequests, maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, - maxRetryDelay, readPollTimeout)); + patterns.put(randomAlphaOfLength(4), pattern); } - return new GetAutoFollowPatternAction.Response(patterns); + return new GetAutoFollowPatternResponse(patterns); } - @Override - protected GetAutoFollowPatternResponse doParseToClientInstance(XContentParser parser) throws IOException { - return GetAutoFollowPatternResponse.fromXContent(parser); - } - - @Override - protected void assertInstances(GetAutoFollowPatternAction.Response serverTestInstance, GetAutoFollowPatternResponse clientInstance) { - assertThat(serverTestInstance.getAutoFollowPatterns().size(), equalTo(clientInstance.getPatterns().size())); - for (Map.Entry entry : serverTestInstance.getAutoFollowPatterns().entrySet()) { - AutoFollowMetadata.AutoFollowPattern serverPattern = entry.getValue(); - GetAutoFollowPatternResponse.Pattern clientPattern = clientInstance.getPatterns().get(entry.getKey()); - assertThat(clientPattern, notNullValue()); - - assertThat(serverPattern.getRemoteCluster(), equalTo(clientPattern.getRemoteCluster())); - assertThat(serverPattern.getLeaderIndexPatterns(), equalTo(clientPattern.getLeaderIndexPatterns())); - assertThat(serverPattern.getFollowIndexPattern(), equalTo(clientPattern.getFollowIndexNamePattern())); - assertThat(serverPattern.getMaxOutstandingReadRequests(), equalTo(clientPattern.getMaxOutstandingReadRequests())); - assertThat(serverPattern.getMaxOutstandingWriteRequests(), equalTo(clientPattern.getMaxOutstandingWriteRequests())); - assertThat(serverPattern.getMaxReadRequestOperationCount(), equalTo(clientPattern.getMaxReadRequestOperationCount())); - assertThat(serverPattern.getMaxWriteRequestOperationCount(), equalTo(clientPattern.getMaxWriteRequestOperationCount())); - assertThat(serverPattern.getMaxReadRequestSize(), equalTo(clientPattern.getMaxReadRequestSize())); - assertThat(serverPattern.getMaxWriteRequestSize(), equalTo(clientPattern.getMaxWriteRequestSize())); - assertThat(serverPattern.getMaxWriteBufferCount(), equalTo(clientPattern.getMaxWriteBufferCount())); - assertThat(serverPattern.getMaxWriteBufferSize(), equalTo(clientPattern.getMaxWriteBufferSize())); - assertThat(serverPattern.getMaxRetryDelay(), equalTo(clientPattern.getMaxRetryDelay())); - assertThat(serverPattern.getReadPollTimeout(), equalTo(clientPattern.getReadPollTimeout())); + public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.startArray(GetAutoFollowPatternResponse.PATTERNS_FIELD.getPreferredName()); + for (Map.Entry entry : response.getPatterns().entrySet()) { + builder.startObject(); + { + builder.field(GetAutoFollowPatternResponse.NAME_FIELD.getPreferredName(), entry.getKey()); + builder.startObject(GetAutoFollowPatternResponse.PATTERN_FIELD.getPreferredName()); + { + GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); + builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); + if (pattern.getFollowIndexNamePattern()!= null) { + builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); + } + entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); } + builder.endObject(); } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java index 52fe70b3a39..00bcf535f08 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/PutFollowResponseTests.java @@ -19,30 +19,35 @@ package org.elasticsearch.client.ccr; -import org.elasticsearch.client.AbstractResponseTestCase; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import static org.hamcrest.Matchers.is; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; -public class PutFollowResponseTests extends AbstractResponseTestCase { +public class PutFollowResponseTests extends ESTestCase { - @Override - protected PutFollowAction.Response createServerTestInstance() { - return new PutFollowAction.Response(randomBoolean(), randomBoolean(), randomBoolean()); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + this::createTestInstance, + PutFollowResponseTests::toXContent, + PutFollowResponse::fromXContent) + .supportsUnknownFields(true) + .test(); } - @Override - protected PutFollowResponse doParseToClientInstance(XContentParser parser) throws IOException { - return PutFollowResponse.fromXContent(parser); + private PutFollowResponse createTestInstance() { + return new PutFollowResponse(randomBoolean(), randomBoolean(), randomBoolean()); } - @Override - protected void assertInstances(PutFollowAction.Response serverTestInstance, PutFollowResponse clientInstance) { - assertThat(serverTestInstance.isFollowIndexCreated(), is(clientInstance.isFollowIndexCreated())); - assertThat(serverTestInstance.isFollowIndexShardsAcked(), is(clientInstance.isFollowIndexShardsAcked())); - assertThat(serverTestInstance.isIndexFollowingStarted(), is(clientInstance.isIndexFollowingStarted())); + public static void toXContent(PutFollowResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.field(PutFollowResponse.FOLLOW_INDEX_CREATED.getPreferredName(), response.isFollowIndexCreated()); + builder.field(PutFollowResponse.FOLLOW_INDEX_SHARDS_ACKED.getPreferredName(), response.isFollowIndexShardsAcked()); + builder.field(PutFollowResponse.INDEX_FOLLOWING_STARTED.getPreferredName(), response.isIndexFollowingStarted()); + } + builder.endObject(); } } From c9ff630a12ab964e6dcb8c0b85e4167d14dcf324 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 8 Apr 2019 13:10:53 +0300 Subject: [PATCH 26/45] Mute reindex integTest in FIPS (#40941) Relates: #40904 --- modules/reindex/build.gradle | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 31fbc3ca1f6..ca2b6e474a6 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -95,6 +95,11 @@ dependencies { es090 'org.elasticsearch:elasticsearch:0.90.13@zip' } +// Issue tracked in https://github.com/elastic/elasticsearch/issues/40904 +if (project.inFipsJvm) { + integTest.enabled = false +} + if (Os.isFamily(Os.FAMILY_WINDOWS)) { logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows") integTest.runner { From fb5d7cf237bdccfba6a00f6b3128772bdd3f4134 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 8 Apr 2019 11:06:24 +0100 Subject: [PATCH 27/45] Mute AsyncTwoPhaseIndexerTests.testStateMachine --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 9e79912f851..39a8807c2d5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -206,6 +206,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/40946") public void testStateMachine() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From bc0fe7d64d0de8544600b669fcbbe6afd16513bc Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 8 Apr 2019 11:22:38 +0200 Subject: [PATCH 28/45] Handle min_doc_freq in phrase suggester (#40840) The phrase suggesters have an option to remove terms that have a frequency lower than a provided min_doc_freq. However this value is overwritten by the frequency of the original term in the popular mode. This change ensures that we keep the maximum value between the provided min_doc_value and the original term frequency as a threshold to select candidates. Fixes #16764 --- .../phrase/DirectCandidateGenerator.java | 54 +++++++++++-------- .../search/suggest/SuggestSearchIT.java | 46 ++++++++++++++++ 2 files changed, 78 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 9d9721ab046..381812104ba 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -132,31 +132,41 @@ public final class DirectCandidateGenerator extends CandidateGenerator { public CandidateSet drawCandidates(CandidateSet set) throws IOException { Candidate original = set.originalTerm; BytesRef term = preFilter(original.term, spare, byteSpare); - if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { - /** - * We use the {@link TermStats#docFreq} to compute the frequency threshold - * because that's what {@link DirectSpellChecker#suggestSimilar} expects - * when filtering terms. - */ - int threshold = thresholdTermFrequency(original.termStats.docFreq); - if (threshold == Integer.MAX_VALUE) { - // the threshold is the max possible frequency so we can skip the search - return set; + float origThreshold = spellchecker.getThresholdFrequency(); + try { + if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { + /** + * We use the {@link TermStats#docFreq} to compute the frequency threshold + * because that's what {@link DirectSpellChecker#suggestSimilar} expects + * when filtering terms. + */ + int threshold = thresholdTermFrequency(original.termStats.docFreq); + if (threshold == Integer.MAX_VALUE) { + // the threshold is the max possible frequency so we can skip the search + return set; + } + // don't override the threshold if the provided min_doc_freq is greater + // than the original term frequency. + if (spellchecker.getThresholdFrequency() < threshold) { + spellchecker.setThresholdFrequency(threshold); + } } - spellchecker.setThresholdFrequency(threshold); - } - SuggestWord[] suggestSimilar = spellchecker.suggestSimilar(new Term(field, term), numCandidates, reader, this.suggestMode); - List candidates = new ArrayList<>(suggestSimilar.length); - for (int i = 0; i < suggestSimilar.length; i++) { - SuggestWord suggestWord = suggestSimilar[i]; - BytesRef candidate = new BytesRef(suggestWord.string); - TermStats termStats = internalTermStats(candidate); - postFilter(new Candidate(candidate, termStats, - suggestWord.score, score(termStats, suggestWord.score, sumTotalTermFreq), false), spare, byteSpare, candidates); + SuggestWord[] suggestSimilar = spellchecker.suggestSimilar(new Term(field, term), numCandidates, reader, this.suggestMode); + List candidates = new ArrayList<>(suggestSimilar.length); + for (int i = 0; i < suggestSimilar.length; i++) { + SuggestWord suggestWord = suggestSimilar[i]; + BytesRef candidate = new BytesRef(suggestWord.string); + TermStats termStats = internalTermStats(candidate); + postFilter(new Candidate(candidate, termStats, + suggestWord.score, score(termStats, suggestWord.score, sumTotalTermFreq), false), spare, byteSpare, candidates); + } + set.addCandidates(candidates); + return set; + } finally { + // restore the original value back + spellchecker.setThresholdFrequency(origThreshold); } - set.addCandidates(candidates); - return set; } protected BytesRef preFilter(final BytesRef term, final CharsRefBuilder spare, final BytesRefBuilder byteSpare) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index d8c2cce0df1..1813f17141b 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -1005,6 +1005,52 @@ public class SuggestSearchIT extends ESIntegTestCase { assertSuggestion(searchSuggest, 0, "suggestion", "apple"); } + public void testPhraseSuggestMinDocFreq() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("text") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(prepareCreate("test") + .setSettings(Settings.builder().put("index.number_of_shards", 1).build()) + .addMapping("type", mapping)); + + List builders = new ArrayList<>(); + builders.add(client().prepareIndex("test", "type").setSource("text", "apple")); + builders.add(client().prepareIndex("test", "type").setSource("text", "apple")); + builders.add(client().prepareIndex("test", "type").setSource("text", "apple")); + builders.add(client().prepareIndex("test", "type").setSource("text", "appfle")); + indexRandom(true, false, builders); + + PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("text").text("appple") + .size(2) + .addCandidateGenerator(new DirectCandidateGeneratorBuilder("text") + .suggestMode("popular")); + + Suggest searchSuggest = searchSuggest("suggestion", phraseSuggest); + assertSuggestion(searchSuggest, 0, "suggestion", 2, "apple", "appfle"); + + phraseSuggest = phraseSuggestion("text").text("appple") + .addCandidateGenerator(new DirectCandidateGeneratorBuilder("text") + .suggestMode("popular") + .minDocFreq(2)); + + searchSuggest = searchSuggest("suggestion", phraseSuggest); + assertSuggestion(searchSuggest, 0, "suggestion", 1,"apple"); + + phraseSuggest = phraseSuggestion("text").text("appple") + .addCandidateGenerator(new DirectCandidateGeneratorBuilder("text") + .suggestMode("popular") + .minDocFreq(2)); + searchSuggest = searchSuggest("suggestion", phraseSuggest); + assertSuggestion(searchSuggest, 0, "suggestion", 1,"apple"); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(DummyTemplatePlugin.class); From 3e078b2026f0736ef7595786008cee33fd9c0967 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 8 Apr 2019 08:15:05 -0400 Subject: [PATCH 29/45] Use bundled Java for all eligible versions in tests (#40928) This commit sets the version to ensure that we use the bundled Java when running integration tests for all eligible versions. In particular, since we started bundling Java with 7.0.0, this commits sets said version to 7.0.0. --- .../elasticsearch/gradle/test/ClusterFormationTasks.groovy | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index f7d3ea4c6ce..05333e47740 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -687,8 +687,7 @@ class ClusterFormationTasks { static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> exec.workingDir node.cwd - // TODO: this must change to 7.0.0 after bundling java has been backported - if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("8.0.0")) || + if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("7.0.0")) || node.config.distribution == 'integ-test-zip') { exec.environment.put('JAVA_HOME', project.runtimeJavaHome) } else { @@ -714,7 +713,7 @@ class ClusterFormationTasks { ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true, dir: node.cwd, taskname: 'elasticsearch') { node.env.each { key, value -> env(key: key, value: value) } - if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("8.0.0")) || + if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("7.0.0")) || node.config.distribution == 'integ-test-zip') { env(key: 'JAVA_HOME', value: project.runtimeJavaHome) } From af874635baf2db891c24b1fba790708937f2bc01 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Mon, 8 Apr 2019 16:45:25 +0300 Subject: [PATCH 30/45] SQL: Prefer resultSets over exceptions in metadata (#40641) Changed the JDBC metadata to return empty results sets instead of throwing SQLFeatureNotSupported as it seems a more safer/compatible approach for consumers. Fix #40533 (cherry picked from commit ef2d2527c2b5140556fd477e7ff6ea36966684da) --- .../xpack/sql/jdbc/JdbcConfiguration.java | 7 +- .../xpack/sql/jdbc/JdbcConnection.java | 7 +- .../xpack/sql/jdbc/JdbcDatabaseMetaData.java | 167 +++++++++++++++--- .../xpack/sql/jdbc/JdbcHttpClient.java | 15 +- .../xpack/sql/jdbc/JdbcResultSet.java | 3 +- .../sql/jdbc/JdbcDatabaseMetaDataTests.java | 108 ++++++++++- .../sql/client/ConnectionConfiguration.java | 8 +- 7 files changed, 273 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java index 36df3488ff8..7a9154c10ac 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java @@ -162,7 +162,7 @@ class JdbcConfiguration extends ConnectionConfiguration { } @Override - protected Collection extraOptions() { + protected Collection extraOptions() { return OPTION_NAMES; } @@ -192,9 +192,8 @@ class JdbcConfiguration extends ConnectionConfiguration { public DriverPropertyInfo[] driverPropertyInfo() { List info = new ArrayList<>(); - for (String option : OPTION_NAMES) { - String value = null; - DriverPropertyInfo prop = new DriverPropertyInfo(option, value); + for (String option : optionNames()) { + DriverPropertyInfo prop = new DriverPropertyInfo(option, null); info.add(prop); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java index c682c5ac05c..09096fbe405 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java @@ -45,9 +45,12 @@ class JdbcConnection implements Connection, JdbcWrapper { * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown */ JdbcConnection(JdbcConfiguration connectionInfo) throws SQLException { - cfg = connectionInfo; - client = new JdbcHttpClient(connectionInfo); + this(connectionInfo, true); + } + JdbcConnection(JdbcConfiguration connectionInfo, boolean checkServer) throws SQLException { + cfg = connectionInfo; + client = new JdbcHttpClient(connectionInfo, checkServer); url = connectionInfo.connectionString(); userName = connectionInfo.authUser(); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index e69d5b02013..eaececff16d 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -10,15 +10,17 @@ import org.elasticsearch.xpack.sql.client.Version; import java.sql.Connection; import java.sql.DatabaseMetaData; +import java.sql.DriverPropertyInfo; import java.sql.JDBCType; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; import java.util.ArrayList; import java.util.List; +import static java.sql.JDBCType.BIGINT; +import static java.sql.JDBCType.BOOLEAN; import static java.sql.JDBCType.INTEGER; import static java.sql.JDBCType.SMALLINT; import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; @@ -209,7 +211,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public String getSystemFunctions() throws SQLException { // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/system-functions?view=sql-server-2017 - return "DATABASE, IFNULL, USER"; + return "DATABASE,IFNULL,USER"; } @Override @@ -663,8 +665,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { // https://www.postgresql.org/docs/9.0/static/infoschema-routines.html @Override public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { - return emptySet(con.cfg, - "ROUTINES", + return emptySet(con.cfg, "ROUTINES", "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", @@ -679,8 +680,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { - return emptySet(con.cfg, - "PARAMETERS", + return emptySet(con.cfg, "ROUTINES_COLUMNS", "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", @@ -774,14 +774,14 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { public ResultSet getCatalogs() throws SQLException { // TABLE_CAT is the first column Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%' LIKE ''", 1); - return memorySet(con.cfg, columnInfo("", "TABLE_CAT"), data); + return memorySet(con.cfg, columnInfo("CATALOGS", "TABLE_CAT"), data); } @Override public ResultSet getTableTypes() throws SQLException { // TABLE_TYPE (4) Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", 4); - return memorySet(con.cfg, columnInfo("", "TABLE_TYPE"), data); + return memorySet(con.cfg, columnInfo("TABLE_TYPES", "TABLE_TYPE"), data); } @Override @@ -797,43 +797,128 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { - throw new SQLFeatureNotSupportedException("Privileges not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "GRANTOR", + "GRANTEE", + "PRIVILEGE", + "IS_GRANTABLE"); } @Override public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - throw new SQLFeatureNotSupportedException("Privileges not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "GRANTOR", + "GRANTEE", + "PRIVILEGE", + "IS_GRANTABLE"); } @Override public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { - throw new SQLFeatureNotSupportedException("Row identifiers not supported"); + return emptySet(con.cfg, "", + "SCOPE", SMALLINT, + "COLUMN_NAME", + "DATA_TYPE", INTEGER, + "TYPE_NAME", + "COLUMN_SIZE", INTEGER, + "BUFFER_LENGTH", INTEGER, + "DECIMAL_DIGITS", SMALLINT, + "PSEUDO_COLUMN", SMALLINT); } @Override public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Version column not supported yet"); + return emptySet(con.cfg, "", + "SCOPE", SMALLINT, + "COLUMN_NAME", + "DATA_TYPE", INTEGER, + "TYPE_NAME", + "COLUMN_SIZE", INTEGER, + "BUFFER_LENGTH", INTEGER, + "DECIMAL_DIGITS", SMALLINT, + "PSEUDO_COLUMN", SMALLINT); } @Override public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Primary keys not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "KEY_SEQ", SMALLINT, + "PK_NAME"); } @Override public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Imported keys not supported"); + return emptySet(con.cfg, "", + "PKTABLE_CAT", + "PKTABLE_SCHEM", + "PKTABLE_NAME", + "PKCOLUMN_NAME", + "FKTABLE_CAT", + "FKTABLE_SCHEM", + "FKTABLE_NAME", + "FKCOLUMN_NAME", + "KEY_SEQ", SMALLINT, + "UPDATE_RULE ", SMALLINT, + "DELETE_RULE ", SMALLINT, + "FK_NAME", + "PK_NAME ", + "DEFERRABILITY", SMALLINT, + "IS_NULLABLE" + ); } @Override public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Exported keys not supported"); + return emptySet(con.cfg, "", + "PKTABLE_CAT", + "PKTABLE_SCHEM", + "PKTABLE_NAME", + "PKCOLUMN_NAME", + "FKTABLE_CAT", + "FKTABLE_SCHEM", + "FKTABLE_NAME", + "FKCOLUMN_NAME", + "KEY_SEQ", SMALLINT, + "UPDATE_RULE ", SMALLINT, + "DELETE_RULE ", SMALLINT, + "FK_NAME", + "PK_NAME ", + "DEFERRABILITY", SMALLINT, + "IS_NULLABLE" + ); } @Override public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { - throw new SQLFeatureNotSupportedException("Cross reference not supported"); + return emptySet(con.cfg, "", + "PKTABLE_CAT", + "PKTABLE_SCHEM", + "PKTABLE_NAME", + "PKCOLUMN_NAME", + "FKTABLE_CAT", + "FKTABLE_SCHEM", + "FKTABLE_NAME", + "FKCOLUMN_NAME", + "KEY_SEQ", SMALLINT, + "UPDATE_RULE ", SMALLINT, + "DELETE_RULE ", SMALLINT, + "FK_NAME", + "PK_NAME ", + "DEFERRABILITY", SMALLINT, + "IS_NULLABLE" + ); } @Override @@ -843,7 +928,22 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { - throw new SQLFeatureNotSupportedException("Indicies not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "NON_UNIQUE", BOOLEAN, + "INDEX_QUALIFIER", + "INDEX_NAME", + "TYPE", SMALLINT, + "ORDINAL_POSITION", SMALLINT, + "COLUMN_NAME", + "ASC_OR_DESC", + "CARDINALITY", BIGINT, + "PAGES", BIGINT, + "FILTER_CONDITION", + "TYPE_NAME" + ); } @Override @@ -908,7 +1008,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "USER_DEFINED_TYPES", "TYPE_CAT", "TYPE_SCHEM", @@ -946,7 +1046,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "SUPER_TYPES", "TYPE_CAT", "TYPE_SCHEM", @@ -959,7 +1059,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - return emptySet(con.cfg, "SUPER_TABLES", + return emptySet(con.cfg, "", "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", @@ -969,7 +1069,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "ATTRIBUTES", "TYPE_CAT", "TYPE_SCHEM", @@ -1056,12 +1156,27 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getClientInfoProperties() throws SQLException { - throw new SQLException("Client info not implemented yet"); + DriverPropertyInfo[] info = con.cfg.driverPropertyInfo(); + Object[][] data = new Object[info.length][]; + + for (int i = 0; i < data.length; i++) { + data[i] = new Object[4]; + data[i][0] = info[i].name; + data[i][1] = Integer.valueOf(-1); + data[i][2] = EMPTY; + data[i][3] = EMPTY; + } + + return memorySet(con.cfg, columnInfo("", + "NAME", + "MAX_LEN", INTEGER, + "DEFAULT_VALUE", + "DESCRIPTION"), data); } @Override public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "FUNCTIONS", "FUNCTION_CAT", "FUNCTION_SCHEM", @@ -1074,7 +1189,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "FUNCTION_COLUMNS", "FUNCTION_CAT", "FUNCTION_SCHEM", @@ -1097,7 +1212,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "PSEUDO_COLUMNS", "TABLE_CAT", "TABLE_SCHEM", @@ -1212,7 +1327,7 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { @Override public int batchSize() { - return data.length; + return ObjectUtils.isEmpty(data) ? 0 : data.length; } @Override diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index b059b83970d..8bf3811ecb7 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -30,17 +30,23 @@ import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; class JdbcHttpClient { private final HttpClient httpClient; private final JdbcConfiguration conCfg; - private final InfoResponse serverInfo; + private InfoResponse serverInfo; /** * The SQLException is the only type of Exception the JDBC API can throw (and that the user expects). * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown */ JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { + this(conCfg, true); + } + + JdbcHttpClient(JdbcConfiguration conCfg, boolean checkServer) throws SQLException { httpClient = new HttpClient(conCfg); this.conCfg = conCfg; - this.serverInfo = fetchServerInfo(); - checkServerVersion(); + if (checkServer) { + this.serverInfo = fetchServerInfo(); + checkServerVersion(); + } } boolean ping(long timeoutInMs) throws SQLException { @@ -78,6 +84,9 @@ class JdbcHttpClient { } InfoResponse serverInfo() throws SQLException { + if (serverInfo == null) { + serverInfo = fetchServerInfo(); + } return serverInfo; } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java index 1d2489fc6d5..9661099c55b 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java @@ -1224,6 +1224,7 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { @Override public String toString() { - return format(Locale.ROOT, "%s:row %d", getClass().getSimpleName(), rowNumber); + return format(Locale.ROOT, "%s:row %d:cursor size %d:%s", getClass().getSimpleName(), rowNumber, cursor.batchSize(), + cursor.columns()); } } diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java index e24deaced9d..065807117ea 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java @@ -6,15 +6,119 @@ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.test.ESTestCase; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; + public class JdbcDatabaseMetaDataTests extends ESTestCase { - private JdbcDatabaseMetaData md = new JdbcDatabaseMetaData(null); + private JdbcDatabaseMetaData md = null; + + { + try { + md = new JdbcDatabaseMetaData( + new JdbcConnection(JdbcConfiguration.create("jdbc:es://localhost:9200/", new Properties(), 10), false)); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } public void testSeparators() throws Exception { assertEquals(":", md.getCatalogSeparator()); assertEquals("\"", md.getIdentifierQuoteString()); assertEquals("\\", md.getSearchStringEscape()); } -} + + public void testGetProcedures() throws Exception { + testEmptySet(() -> md.getProcedures(null, null, null)); + } + + public void testGetProcedureColumns() throws Exception { + testEmptySet(() -> md.getProcedureColumns(null, null, null, null)); + } + + public void testGetColumnPrivileges() throws Exception { + testEmptySet(() -> md.getColumnPrivileges(null, null, null, null)); + } + + public void testGetTablePrivileges() throws Exception { + testEmptySet(() -> md.getTablePrivileges(null, null, null)); + } + + public void testGetBestRowIdentifier() throws Exception { + testEmptySet(() -> md.getBestRowIdentifier(null, null, null, 0, false)); + } + + public void testGetVersionColumns() throws Exception { + testEmptySet(() -> md.getVersionColumns(null, null, null)); + } + + public void testGetPrimaryKeys() throws Exception { + testEmptySet(() -> md.getPrimaryKeys(null, null, null)); + } + + public void testGetImportedKeys() throws Exception { + testEmptySet(() -> md.getImportedKeys(null, null, null)); + } + + public void testGetExportedKeys() throws Exception { + testEmptySet(() -> md.getExportedKeys(null, null, null)); + } + + public void testGetCrossReference() throws Exception { + testEmptySet(() -> md.getCrossReference(null, null, null, null, null, null)); + } + + public void testGetIndexInfo() throws Exception { + testEmptySet(() -> md.getIndexInfo(null, null, null, false, false)); + } + + public void testGetUDTs() throws Exception { + testEmptySet(() -> md.getUDTs(null, null, null, null)); + } + + public void testGetSuperTypes() throws Exception { + testEmptySet(() -> md.getSuperTypes(null, null, null)); + } + + public void testGetSuperTables() throws Exception { + testEmptySet(() -> md.getSuperTables(null, null, null)); + } + + public void testGetAttributes() throws Exception { + testEmptySet(() -> md.getAttributes(null, null, null, null)); + } + + public void testGetFunctions() throws Exception { + testEmptySet(() -> md.getFunctions(null, null, null)); + } + + public void testGetFunctionColumns() throws Exception { + testEmptySet(() -> md.getFunctionColumns(null, null, null, null)); + } + + public void testGetPseudoColumns() throws Exception { + testEmptySet(() -> md.getPseudoColumns(null, null, null, null)); + } + + private static void testEmptySet(CheckedSupplier supplier) throws SQLException { + try (ResultSet result = supplier.get()) { + assertNotNull(result); + assertFalse(result.next()); + } + } + + public void testGetClientInfoProperties() throws Exception { + try (ResultSet result = md.getClientInfoProperties()) { + assertNotNull(result); + assertTrue(result.next()); + assertNotNull(result.getString(1)); + assertEquals(-1, result.getInt(2)); + assertEquals("", result.getString(3)); + assertEquals("", result.getString(4)); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java index c3c89906c23..591762b18a9 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java @@ -7,13 +7,13 @@ package org.elasticsearch.xpack.sql.client; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Enumeration; import java.util.LinkedHashSet; import java.util.Properties; import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.TimeUnit; import java.util.function.Function; @@ -148,13 +148,13 @@ public class ConnectionConfiguration { } } - private Collection optionNames() { - Collection options = new ArrayList<>(OPTION_NAMES); + protected Collection optionNames() { + Set options = new TreeSet<>(OPTION_NAMES); options.addAll(extraOptions()); return options; } - protected Collection extraOptions() { + protected Collection extraOptions() { return emptyList(); } From f78e6ef73b4ac47f223ee471336a011d3deced0a Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 8 Apr 2019 14:57:42 +0100 Subject: [PATCH 31/45] Short-circuit rebalancing when disabled (#40942) Today if `cluster.routing.rebalance.enable: none` then rebalancing is disabled, but we still execute `balanceByWeights()` and perform some rather expensive calculations before discovering that we cannot rebalance any shards. In a large cluster this can make cluster state updates occur rather slowly. With this change we check earlier whether rebalancing is globally disabled and, if so, avoid the rebalancing process entirely. --- .../decider/EnableAllocationDecider.java | 30 ++- .../EnableAllocationShortCircuitTests.java | 233 ++++++++++++++++++ 2 files changed, 260 insertions(+), 3 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 8a72fe8cb49..c73a630bb66 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -86,16 +86,21 @@ public class EnableAllocationDecider extends AllocationDecider { clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); } - public void setEnableRebalance(Rebalance enableRebalance) { + private void setEnableRebalance(Rebalance enableRebalance) { this.enableRebalance = enableRebalance; } - public void setEnableAllocation(Allocation enableAllocation) { + private void setEnableAllocation(Allocation enableAllocation) { this.enableAllocation = enableAllocation; } @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return canAllocate(shardRouting, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { return allocation.decision(Decision.YES, NAME, "explicitly ignoring any disabling of allocation due to manual allocation commands via the reroute API"); @@ -136,10 +141,29 @@ public class EnableAllocationDecider extends AllocationDecider { } } + @Override + public Decision canRebalance(RoutingAllocation allocation) { + if (allocation.ignoreDisable()) { + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); + } + + if (enableRebalance == Rebalance.NONE) { + for (IndexMetaData indexMetaData : allocation.metaData()) { + if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexMetaData.getSettings()) + && INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexMetaData.getSettings()) != Rebalance.NONE) { + return allocation.decision(Decision.YES, NAME, "rebalancing is permitted on one or more indices"); + } + } + return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to %s", setting(enableRebalance, false)); + } + + return allocation.decision(Decision.YES, NAME, "rebalancing is not globally disabled"); + } + @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { - return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation"); + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); } Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java new file mode 100644 index 00000000000..ebe6f8f0220 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.test.gateway.TestGatewayAllocator; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class EnableAllocationShortCircuitTests extends ESAllocationTestCase { + + private static ClusterState createClusterStateWithAllShardsAssigned() { + AllocationService allocationService = createAllocationService(Settings.EMPTY); + + final int numberOfNodes = randomIntBetween(1, 5); + final DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < numberOfNodes; i++) { + discoveryNodesBuilder.add(newNode("node" + i)); + } + + final MetaData.Builder metadataBuilder = MetaData.builder(); + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + for (int i = randomIntBetween(1, 10); i >= 0; i--) { + final IndexMetaData indexMetaData = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(randomIntBetween(0, numberOfNodes - 1)).build(); + metadataBuilder.put(indexMetaData, true); + routingTableBuilder.addAsNew(indexMetaData); + } + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(Settings.EMPTY)) + .nodes(discoveryNodesBuilder).metaData(metadataBuilder).routingTable(routingTableBuilder.build()).build(); + + while (clusterState.getRoutingNodes().hasUnassignedShards() + || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).isEmpty() == false) { + clusterState = allocationService.applyStartedShards(clusterState, + clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING)); + clusterState = allocationService.reroute(clusterState, "reroute"); + } + + return clusterState; + } + + public void testRebalancingAttemptedIfPermitted() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomFrom(EnableAllocationDecider.Allocation.ALL, + EnableAllocationDecider.Allocation.NEW_PRIMARIES, + EnableAllocationDecider.Allocation.PRIMARIES).name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, greaterThan(0)); + } + + public void testRebalancingSkippedIfDisabled() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, equalTo(0)); + } + + public void testRebalancingSkippedIfDisabledIncludingOnSpecificIndices() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) + .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()))).build()).build(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, equalTo(0)); + } + + public void testRebalancingAttemptedIfDisabledButOverridenOnSpecificIndices() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) + .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomFrom(EnableAllocationDecider.Allocation.ALL, + EnableAllocationDecider.Allocation.NEW_PRIMARIES, + EnableAllocationDecider.Allocation.PRIMARIES).name()))).build()).build(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, greaterThan(0)); + } + + public void testAllocationSkippedIfDisabled() { + final AllocateShortCircuitPlugin plugin = new AllocateShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), + plugin); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.canAllocateAttempts, equalTo(0)); + } + + private static AllocationService createAllocationService(Settings.Builder settings, ClusterPlugin plugin) { + final ClusterSettings emptyClusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + List deciders = new ArrayList<>(ClusterModule.createAllocationDeciders(settings.build(), emptyClusterSettings, + Collections.singletonList(plugin))); + return new MockAllocationService( + new AllocationDeciders(deciders), + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + } + + private static class RebalanceShortCircuitPlugin implements ClusterPlugin { + int rebalanceAttempts; + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return Collections.singletonList(new RebalanceShortCircuitAllocationDecider()); + } + + private class RebalanceShortCircuitAllocationDecider extends AllocationDecider { + + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + rebalanceAttempts++; + return super.canRebalance(shardRouting, allocation); + } + + @Override + public Decision canRebalance(RoutingAllocation allocation) { + rebalanceAttempts++; + return super.canRebalance(allocation); + } + } + } + + private static class AllocateShortCircuitPlugin implements ClusterPlugin { + int canAllocateAttempts; + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return Collections.singletonList(new AllocateShortCircuitAllocationDecider()); + } + + private class AllocateShortCircuitAllocationDecider extends AllocationDecider { + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(shardRouting, node, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(shardRouting, allocation); + } + + @Override + public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(indexMetaData, node, allocation); + } + + @Override + public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(node, allocation); + } + } + } +} From 21b99a3aebaa9cbed96f62f773ef71137e479a54 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 5 Apr 2019 14:10:22 +0300 Subject: [PATCH 32/45] Remove unneded cluster config from test (#40856) This configuration doesn't influence the logger test. Should be removed to avoid confusion --- qa/logging-config/build.gradle | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 0abdc124751..7b5682507cb 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -1,4 +1,4 @@ -/* + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright @@ -23,8 +23,6 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.standalone-test' integTestCluster { - autoSetInitialMasterNodes = false - autoSetHostsProvider = false /** * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch * can successfully startup. From 2569fb60de676511be7665f24102736221056409 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 8 Apr 2019 07:05:12 -0700 Subject: [PATCH 33/45] Avoid sharing source directories as it breaks intellij (#40877) * Avoid sharing source directories as it breaks intellij * Subprojects share main project output classes directory * Fix jar hell * Fix sql security with ssl integ tests * Relax dependency ordering rule so we don't explode on cycles --- build.gradle | 33 +------------------ qa/full-cluster-restart/build.gradle | 12 +++++++ x-pack/plugin/core/build.gradle | 7 ++-- x-pack/plugin/security/build.gradle | 10 +++--- x-pack/plugin/sql/qa/security/build.gradle | 12 ++++--- x-pack/qa/full-cluster-restart/build.gradle | 10 +----- x-pack/qa/security-tools-tests/build.gradle | 4 ++- .../third-party/active-directory/build.gradle | 5 ++- 8 files changed, 39 insertions(+), 54 deletions(-) diff --git a/build.gradle b/build.gradle index 3217ea93546..2e3fc3178bf 100644 --- a/build.gradle +++ b/build.gradle @@ -338,14 +338,6 @@ gradle.projectsEvaluated { integTest.mustRunAfter test } configurations.all { Configuration configuration -> - /* - * The featureAwarePlugin configuration has a dependency on x-pack:plugin:core and x-pack:plugin:core has a dependency on the - * featureAwarePlugin configuration. The below task ordering logic would force :x-pack:plugin:core:test - * :x-pack:test:feature-aware:test to depend on each other circularly. We break that cycle here. - */ - if (configuration.name == "featureAwarePlugin") { - return - } dependencies.all { Dependency dep -> Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { @@ -357,7 +349,7 @@ gradle.projectsEvaluated { Task task = project.tasks.findByName(taskName) Task upstreamTask = upstreamProject.tasks.findByName(taskName) if (task != null && upstreamTask != null) { - task.mustRunAfter(upstreamTask) + task.shouldRunAfter(upstreamTask) } } } @@ -382,21 +374,6 @@ allprojects { // also ignore other possible build dirs excludeDirs += file('build') excludeDirs += file('build-eclipse') - - iml { - // fix so that Gradle idea plugin properly generates support for resource folders - // see also https://issues.gradle.org/browse/GRADLE-2975 - withXml { - it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each { - it.attributes().remove('isTestSource') - it.attributes().put('type', 'java-resource') - } - it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each { - it.attributes().remove('isTestSource') - it.attributes().put('type', 'java-test-resource') - } - } - } } } @@ -414,14 +391,6 @@ idea { vcs = 'Git' } } -// Make sure gradle idea was run before running anything in intellij (including import). -File ideaMarker = new File(projectDir, '.local-idea-is-configured') -tasks.idea.doLast { - ideaMarker.setText('', 'UTF-8') -} -if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) { - throw new GradleException('You must run `./gradlew idea` from the root of elasticsearch before importing into IntelliJ') -} // eclipse configuration allprojects { diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 60c552fc100..a856dd1f0ec 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -105,3 +105,15 @@ task bwcTestSnapshots { check.dependsOn(bwcTestSnapshots) +configurations { + testArtifacts.extendsFrom testRuntime +} + +task testJar(type: Jar) { + appendix 'test' + from sourceSets.test.output +} + +artifacts { + testArtifacts testJar +} \ No newline at end of file diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 19c1764e9c2..8828a71b06b 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -48,6 +48,9 @@ dependencies { testCompile project(path: ':modules:reindex', configuration: 'runtime') testCompile project(path: ':modules:parent-join', configuration: 'runtime') testCompile project(path: ':modules:analysis-common', configuration: 'runtime') + testCompile(project(':x-pack:license-tools')) { + transitive = false + } testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") } @@ -95,8 +98,8 @@ licenseHeaders { } // make LicenseSigner available for testing signed licenses -sourceSets.test.java { - srcDir '../../license-tools/src/main/java' +sourceSets.test.resources { + srcDir 'src/main/config' } unitTest { diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 7608543fb8a..fb089afb1ba 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -126,6 +126,11 @@ dependencies { compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" +processTestResources { + from(project(xpackModule('core')).file('src/main/config')) + from(project(xpackModule('core')).file('src/test/resources')) +} + configurations { testArtifacts.extendsFrom testRuntime } @@ -138,10 +143,7 @@ artifacts { archives jar testArtifacts testJar } -sourceSets.test.resources { - srcDir '../core/src/test/resources' - srcDir '../core/src/main/config' -} + dependencyLicenses { mapping from: /java-support|opensaml-.*/, to: 'shibboleth' mapping from: /http.*/, to: 'httpclient' diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index 45ab8c92f1e..79ebff00854 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -13,15 +13,17 @@ subprojects { // Use resources from the parent project in subprojects sourceSets { test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java"] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] + mainProject.sourceSets.test.output.classesDirs.each { dir -> + output.addClassesDir { dir } } + runtimeClasspath += mainProject.sourceSets.test.output } } + processTestResources { + from mainProject.file('src/test/resources') + } + dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 40dca76abc9..da06c6ac5ef 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -25,6 +25,7 @@ dependencies { // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper exclude group: "com.google.guava", module: "guava" } + testCompile project(path: ':qa:full-cluster-restart', configuration: 'testArtifacts') } Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> @@ -70,15 +71,6 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -String coreFullClusterRestartPath = project(':qa:full-cluster-restart').projectDir.toPath().resolve('src/test/java').toString() -sourceSets { - test { - java { - srcDirs += [coreFullClusterRestartPath] - } - } -} - licenseHeaders { approvedLicenses << 'Apache' } diff --git a/x-pack/qa/security-tools-tests/build.gradle b/x-pack/qa/security-tools-tests/build.gradle index 5df22c557db..135f82bb4a6 100644 --- a/x-pack/qa/security-tools-tests/build.gradle +++ b/x-pack/qa/security-tools-tests/build.gradle @@ -8,7 +8,9 @@ dependencies { } // add test resources from security, so certificate tool tests can use example certs -sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +processTestResources { + from(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +} // we have to repeate these patterns because the security test resources are effectively in the src of this project forbiddenPatterns { diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle index b0a48a7b19f..e0c1076bdd7 100644 --- a/x-pack/qa/third-party/active-directory/build.gradle +++ b/x-pack/qa/third-party/active-directory/build.gradle @@ -9,7 +9,10 @@ dependencies { testFixtures.useFixture ":x-pack:test:smb-fixture" // add test resources from security, so tests can use example certs -sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +processTestResources { + from(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +} + compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" // we have to repeat these patterns because the security test resources are effectively in the src of this project From 8eef92fafd8944c95f526ce63d4ebfbc4992e6f4 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 8 Apr 2019 15:58:56 +0100 Subject: [PATCH 34/45] Revert "Short-circuit rebalancing when disabled (#40942)" This reverts commit f78e6ef73b4ac47f223ee471336a011d3deced0a. --- .../decider/EnableAllocationDecider.java | 30 +-- .../EnableAllocationShortCircuitTests.java | 233 ------------------ 2 files changed, 3 insertions(+), 260 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index c73a630bb66..8a72fe8cb49 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -86,21 +86,16 @@ public class EnableAllocationDecider extends AllocationDecider { clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); } - private void setEnableRebalance(Rebalance enableRebalance) { + public void setEnableRebalance(Rebalance enableRebalance) { this.enableRebalance = enableRebalance; } - private void setEnableAllocation(Allocation enableAllocation) { + public void setEnableAllocation(Allocation enableAllocation) { this.enableAllocation = enableAllocation; } @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - return canAllocate(shardRouting, allocation); - } - - @Override - public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { return allocation.decision(Decision.YES, NAME, "explicitly ignoring any disabling of allocation due to manual allocation commands via the reroute API"); @@ -141,29 +136,10 @@ public class EnableAllocationDecider extends AllocationDecider { } } - @Override - public Decision canRebalance(RoutingAllocation allocation) { - if (allocation.ignoreDisable()) { - return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); - } - - if (enableRebalance == Rebalance.NONE) { - for (IndexMetaData indexMetaData : allocation.metaData()) { - if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexMetaData.getSettings()) - && INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexMetaData.getSettings()) != Rebalance.NONE) { - return allocation.decision(Decision.YES, NAME, "rebalancing is permitted on one or more indices"); - } - } - return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to %s", setting(enableRebalance, false)); - } - - return allocation.decision(Decision.YES, NAME, "rebalancing is not globally disabled"); - } - @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { - return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation"); } Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java deleted file mode 100644 index ebe6f8f0220..00000000000 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.routing.allocation.decider; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.EmptyClusterInfoService; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.ClusterPlugin; -import org.elasticsearch.test.gateway.TestGatewayAllocator; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; - -public class EnableAllocationShortCircuitTests extends ESAllocationTestCase { - - private static ClusterState createClusterStateWithAllShardsAssigned() { - AllocationService allocationService = createAllocationService(Settings.EMPTY); - - final int numberOfNodes = randomIntBetween(1, 5); - final DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); - for (int i = 0; i < numberOfNodes; i++) { - discoveryNodesBuilder.add(newNode("node" + i)); - } - - final MetaData.Builder metadataBuilder = MetaData.builder(); - final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - for (int i = randomIntBetween(1, 10); i >= 0; i--) { - final IndexMetaData indexMetaData = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)) - .numberOfShards(1).numberOfReplicas(randomIntBetween(0, numberOfNodes - 1)).build(); - metadataBuilder.put(indexMetaData, true); - routingTableBuilder.addAsNew(indexMetaData); - } - - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(Settings.EMPTY)) - .nodes(discoveryNodesBuilder).metaData(metadataBuilder).routingTable(routingTableBuilder.build()).build(); - - while (clusterState.getRoutingNodes().hasUnassignedShards() - || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).isEmpty() == false) { - clusterState = allocationService.applyStartedShards(clusterState, - clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING)); - clusterState = allocationService.reroute(clusterState, "reroute"); - } - - return clusterState; - } - - public void testRebalancingAttemptedIfPermitted() { - ClusterState clusterState = createClusterStateWithAllShardsAssigned(); - - final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); - AllocationService allocationService = createAllocationService(Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), - randomFrom(EnableAllocationDecider.Allocation.ALL, - EnableAllocationDecider.Allocation.NEW_PRIMARIES, - EnableAllocationDecider.Allocation.PRIMARIES).name()), - plugin); - allocationService.reroute(clusterState, "reroute").routingTable(); - assertThat(plugin.rebalanceAttempts, greaterThan(0)); - } - - public void testRebalancingSkippedIfDisabled() { - ClusterState clusterState = createClusterStateWithAllShardsAssigned(); - - final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); - AllocationService allocationService = createAllocationService(Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), - plugin); - allocationService.reroute(clusterState, "reroute").routingTable(); - assertThat(plugin.rebalanceAttempts, equalTo(0)); - } - - public void testRebalancingSkippedIfDisabledIncludingOnSpecificIndices() { - ClusterState clusterState = createClusterStateWithAllShardsAssigned(); - final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) - .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) - .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()))).build()).build(); - - final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); - AllocationService allocationService = createAllocationService(Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), - plugin); - allocationService.reroute(clusterState, "reroute").routingTable(); - assertThat(plugin.rebalanceAttempts, equalTo(0)); - } - - public void testRebalancingAttemptedIfDisabledButOverridenOnSpecificIndices() { - ClusterState clusterState = createClusterStateWithAllShardsAssigned(); - final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) - .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) - .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), - randomFrom(EnableAllocationDecider.Allocation.ALL, - EnableAllocationDecider.Allocation.NEW_PRIMARIES, - EnableAllocationDecider.Allocation.PRIMARIES).name()))).build()).build(); - - final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); - AllocationService allocationService = createAllocationService(Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), - plugin); - allocationService.reroute(clusterState, "reroute").routingTable(); - assertThat(plugin.rebalanceAttempts, greaterThan(0)); - } - - public void testAllocationSkippedIfDisabled() { - final AllocateShortCircuitPlugin plugin = new AllocateShortCircuitPlugin(); - AllocationService allocationService = createAllocationService(Settings.builder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), - plugin); - - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); - - RoutingTable routingTable = RoutingTable.builder() - .addAsNew(metaData.index("test")) - .build(); - - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); - - allocationService.reroute(clusterState, "reroute").routingTable(); - assertThat(plugin.canAllocateAttempts, equalTo(0)); - } - - private static AllocationService createAllocationService(Settings.Builder settings, ClusterPlugin plugin) { - final ClusterSettings emptyClusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - List deciders = new ArrayList<>(ClusterModule.createAllocationDeciders(settings.build(), emptyClusterSettings, - Collections.singletonList(plugin))); - return new MockAllocationService( - new AllocationDeciders(deciders), - new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); - } - - private static class RebalanceShortCircuitPlugin implements ClusterPlugin { - int rebalanceAttempts; - - @Override - public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { - return Collections.singletonList(new RebalanceShortCircuitAllocationDecider()); - } - - private class RebalanceShortCircuitAllocationDecider extends AllocationDecider { - - @Override - public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { - rebalanceAttempts++; - return super.canRebalance(shardRouting, allocation); - } - - @Override - public Decision canRebalance(RoutingAllocation allocation) { - rebalanceAttempts++; - return super.canRebalance(allocation); - } - } - } - - private static class AllocateShortCircuitPlugin implements ClusterPlugin { - int canAllocateAttempts; - - @Override - public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { - return Collections.singletonList(new AllocateShortCircuitAllocationDecider()); - } - - private class AllocateShortCircuitAllocationDecider extends AllocationDecider { - - @Override - public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - canAllocateAttempts++; - return super.canAllocate(shardRouting, node, allocation); - } - - @Override - public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { - canAllocateAttempts++; - return super.canAllocate(shardRouting, allocation); - } - - @Override - public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) { - canAllocateAttempts++; - return super.canAllocate(indexMetaData, node, allocation); - } - - @Override - public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { - canAllocateAttempts++; - return super.canAllocate(node, allocation); - } - } - } -} From 2206491277c44fc7f3b0ba42779c999627dfdc19 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Mon, 8 Apr 2019 16:43:36 +0200 Subject: [PATCH 35/45] SQL: Refactor args verification of In & conditionals (#40916) Move verification of arguments for Conditional functions and IN from `Verifier` to the `resolveType()` method of the functions. (cherry picked from commit 241644aac57baee1eb128b993ee410c7d08172a5) --- .../xpack/sql/analysis/analyzer/Verifier.java | 53 ----- .../expression/function/FunctionRegistry.java | 48 ++--- .../ArbitraryConditionalFunction.java | 9 - .../conditional/ConditionalFunction.java | 30 +++ .../predicate/conditional/NullIf.java | 6 - .../predicate/operator/comparison/In.java | 18 +- .../xpack/sql/type/DataTypes.java | 11 + .../xpack/sql/util/StringUtils.java | 24 ++- .../analyzer/VerifierErrorMessagesTests.java | 67 +----- .../function/FunctionRegistryTests.java | 6 +- .../xpack/sql/optimizer/OptimizerTests.java | 193 +++++++++--------- 11 files changed, 212 insertions(+), 253 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index bade2d44c8a..db84a444f57 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -26,8 +26,6 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.TopHits; import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.sql.expression.predicate.conditional.ConditionalFunction; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.Distinct; import org.elasticsearch.xpack.sql.plan.logical.Filter; @@ -228,9 +226,6 @@ public final class Verifier { Set localFailures = new LinkedHashSet<>(); - validateInExpression(p, localFailures); - validateConditional(p, localFailures); - checkGroupingFunctionInGroupBy(p, localFailures); checkFilterOnAggs(p, localFailures); checkFilterOnGrouping(p, localFailures); @@ -724,52 +719,4 @@ public final class Verifier { fail(nested.get(0), "HAVING isn't (yet) compatible with nested fields " + new AttributeSet(nested).names())); } } - - private static void validateInExpression(LogicalPlan p, Set localFailures) { - p.forEachExpressions(e -> - e.forEachUp((In in) -> { - DataType dt = in.value().dataType(); - for (Expression value : in.list()) { - if (areTypesCompatible(dt, value.dataType()) == false) { - localFailures.add(fail(value, "expected data type [{}], value provided is of type [{}]", - dt.typeName, value.dataType().typeName)); - return; - } - } - }, - In.class)); - } - - private static void validateConditional(LogicalPlan p, Set localFailures) { - p.forEachExpressions(e -> - e.forEachUp((ConditionalFunction cf) -> { - DataType dt = DataType.NULL; - - for (Expression child : cf.children()) { - if (dt == DataType.NULL) { - if (Expressions.isNull(child) == false) { - dt = child.dataType(); - } - } else { - if (areTypesCompatible(dt, child.dataType()) == false) { - localFailures.add(fail(child, "expected data type [{}], value provided is of type [{}]", - dt.typeName, child.dataType().typeName)); - return; - } - } - } - }, - ConditionalFunction.class)); - } - - private static boolean areTypesCompatible(DataType left, DataType right) { - if (left == right) { - return true; - } else { - return - (left == DataType.NULL || right == DataType.NULL) || - (left.isString() && right.isString()) || - (left.isNumeric() && right.isNumeric()); - } - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index 1e22be34661..a29b19e4128 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -262,7 +262,7 @@ public class FunctionRegistry { for (String alias : f.aliases()) { Object old = batchMap.put(alias, f); if (old != null || defs.containsKey(alias)) { - throw new IllegalArgumentException("alias [" + alias + "] is used by " + throw new SqlIllegalArgumentException("alias [" + alias + "] is used by " + "[" + (old != null ? old : defs.get(alias).name()) + "] and [" + f.name() + "]"); } aliases.put(alias, f.name()); @@ -321,10 +321,10 @@ public class FunctionRegistry { java.util.function.Function ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (false == children.isEmpty()) { - throw new IllegalArgumentException("expects no arguments"); + throw new SqlIllegalArgumentException("expects no arguments"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.apply(source); }; @@ -341,10 +341,10 @@ public class FunctionRegistry { ConfigurationAwareFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (false == children.isEmpty()) { - throw new IllegalArgumentException("expects no arguments"); + throw new SqlIllegalArgumentException("expects no arguments"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, cfg); }; @@ -365,10 +365,10 @@ public class FunctionRegistry { UnaryConfigurationAwareFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (children.size() > 1) { - throw new IllegalArgumentException("expects exactly one argument"); + throw new SqlIllegalArgumentException("expects exactly one argument"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } Expression ex = children.size() == 1 ? children.get(0) : null; return ctorRef.build(source, ex, cfg); @@ -390,10 +390,10 @@ public class FunctionRegistry { BiFunction ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (children.size() != 1) { - throw new IllegalArgumentException("expects exactly one argument"); + throw new SqlIllegalArgumentException("expects exactly one argument"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.apply(source, children.get(0)); }; @@ -409,7 +409,7 @@ public class FunctionRegistry { MultiFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, children); }; @@ -429,7 +429,7 @@ public class FunctionRegistry { DistinctAwareUnaryFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (children.size() != 1) { - throw new IllegalArgumentException("expects exactly one argument"); + throw new SqlIllegalArgumentException("expects exactly one argument"); } return ctorRef.build(source, children.get(0), distinct); }; @@ -449,10 +449,10 @@ public class FunctionRegistry { DatetimeUnaryFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (children.size() != 1) { - throw new IllegalArgumentException("expects exactly one argument"); + throw new SqlIllegalArgumentException("expects exactly one argument"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, children.get(0), cfg.zoneId()); }; @@ -471,10 +471,10 @@ public class FunctionRegistry { static FunctionDefinition def(Class function, DatetimeBinaryFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (children.size() != 2) { - throw new IllegalArgumentException("expects exactly two arguments"); + throw new SqlIllegalArgumentException("expects exactly two arguments"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, children.get(0), children.get(1), cfg.zoneId()); }; @@ -496,13 +496,13 @@ public class FunctionRegistry { boolean isBinaryOptionalParamFunction = function.isAssignableFrom(Round.class) || function.isAssignableFrom(Truncate.class) || TopHits.class.isAssignableFrom(function); if (isBinaryOptionalParamFunction && (children.size() > 2 || children.size() < 1)) { - throw new IllegalArgumentException("expects one or two arguments"); + throw new SqlIllegalArgumentException("expects one or two arguments"); } else if (!isBinaryOptionalParamFunction && children.size() != 2) { - throw new IllegalArgumentException("expects exactly two arguments"); + throw new SqlIllegalArgumentException("expects exactly two arguments"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, children.get(0), children.size() == 2 ? children.get(1) : null); }; @@ -527,7 +527,7 @@ public class FunctionRegistry { FunctionDefinition.Builder realBuilder = (uf, distinct, cfg) -> { try { return builder.build(uf.source(), uf.children(), distinct, cfg); - } catch (IllegalArgumentException e) { + } catch (SqlIllegalArgumentException e) { throw new ParsingException(uf.source(), "error building [" + primaryName + "]: " + e.getMessage(), e); } }; @@ -544,12 +544,12 @@ public class FunctionRegistry { FunctionBuilder builder = (source, children, distinct, cfg) -> { boolean isLocateFunction = function.isAssignableFrom(Locate.class); if (isLocateFunction && (children.size() > 3 || children.size() < 2)) { - throw new IllegalArgumentException("expects two or three arguments"); + throw new SqlIllegalArgumentException("expects two or three arguments"); } else if (!isLocateFunction && children.size() != 3) { - throw new IllegalArgumentException("expects exactly three arguments"); + throw new SqlIllegalArgumentException("expects exactly three arguments"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, children.get(0), children.get(1), children.size() == 3 ? children.get(2) : null); }; @@ -565,10 +565,10 @@ public class FunctionRegistry { FourParametersFunctionBuilder ctorRef, String... names) { FunctionBuilder builder = (source, children, distinct, cfg) -> { if (children.size() != 4) { - throw new IllegalArgumentException("expects exactly four arguments"); + throw new SqlIllegalArgumentException("expects exactly four arguments"); } if (distinct) { - throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + throw new SqlIllegalArgumentException("does not support DISTINCT yet it was specified"); } return ctorRef.build(source, children.get(0), children.get(1), children.get(2), children.get(3)); }; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java index ecc5835d1aa..b2bea979acd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.expression.predicate.conditional.ConditionalProcessor.ConditionalOperation; import org.elasticsearch.xpack.sql.tree.Source; -import org.elasticsearch.xpack.sql.type.DataTypeConversion; import java.util.ArrayList; import java.util.List; @@ -33,14 +32,6 @@ public abstract class ArbitraryConditionalFunction extends ConditionalFunction { this.operation = operation; } - @Override - protected TypeResolution resolveType() { - for (Expression e : children()) { - dataType = DataTypeConversion.commonType(dataType, e.dataType()); - } - return TypeResolution.TYPE_RESOLVED; - } - @Override protected Pipe makePipe() { return new ConditionalPipe(source(), this, Expressions.pipe(children()), operation); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java index 13b765e941c..3de85185e8a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java @@ -12,9 +12,14 @@ import org.elasticsearch.xpack.sql.expression.Nullability; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; import java.util.List; +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.type.DataTypes.areTypesCompatible; +import static org.elasticsearch.xpack.sql.util.StringUtils.ordinal; + /** * Base class for conditional predicates. */ @@ -36,6 +41,31 @@ public abstract class ConditionalFunction extends ScalarFunction { return Expressions.foldable(children()); } + @Override + protected TypeResolution resolveType() { + DataType dt = DataType.NULL; + + for (int i = 0; i < children().size(); i++) { + Expression child = children().get(i); + if (dt == DataType.NULL) { + if (Expressions.isNull(child) == false) { + dt = child.dataType(); + } + } else { + if (areTypesCompatible(dt, child.dataType()) == false) { + return new TypeResolution(format(null, "{} argument of [{}] must be [{}], found value [{}] type [{}]", + ordinal(i + 1), + sourceText(), + dt.typeName, + Expressions.name(child), + child.dataType().typeName)); + } + } + dataType = DataTypeConversion.commonType(dataType, child.dataType()); + } + return TypeResolution.TYPE_RESOLVED; + } + @Override public Nullability nullable() { return Nullability.UNKNOWN; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java index dac5add1792..50692edb40e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/NullIf.java @@ -39,12 +39,6 @@ public class NullIf extends ConditionalFunction { return new NullIf(source(), newChildren.get(0), newChildren.get(1)); } - @Override - protected TypeResolution resolveType() { - dataType = children().get(0).dataType(); - return TypeResolution.TYPE_RESOLVED; - } - @Override public Object fold() { return NullIfProcessor.apply(children().get(0).fold(), children().get(1).fold()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java index e687c9ac1ba..342407c21b3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java @@ -26,6 +26,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.type.DataTypes.areTypesCompatible; +import static org.elasticsearch.xpack.sql.util.StringUtils.ordinal; public class In extends ScalarFunction { @@ -109,7 +111,7 @@ public class In extends ScalarFunction { @Override protected TypeResolution resolveType() { TypeResolution resolution = TypeResolutions.isExact(value, functionName(), Expressions.ParamOrdinal.DEFAULT); - if (resolution != TypeResolution.TYPE_RESOLVED) { + if (resolution.unresolved()) { return resolution; } @@ -120,6 +122,20 @@ public class In extends ScalarFunction { name())); } } + + DataType dt = value.dataType(); + for (int i = 0; i < list.size(); i++) { + Expression listValue = list.get(i); + if (areTypesCompatible(dt, listValue.dataType()) == false) { + return new TypeResolution(format(null, "{} argument of [{}] must be [{}], found value [{}] type [{}]", + ordinal(i + 1), + sourceText(), + dt.typeName, + Expressions.name(listValue), + listValue.dataType().typeName)); + } + } + return super.resolveType(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index c74f6397452..dcd6a1b35a1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -230,4 +230,15 @@ public final class DataTypes { } return t.displaySize; } + + public static boolean areTypesCompatible(DataType left, DataType right) { + if (left == right) { + return true; + } else { + return + (left == DataType.NULL || right == DataType.NULL) || + (left.isString() && right.isString()) || + (left.isNumeric() && right.isNumeric()); + } + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java index 10066e77649..d2e8d3badf6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -23,12 +23,16 @@ import java.util.Locale; import static java.util.stream.Collectors.toList; -public abstract class StringUtils { +public final class StringUtils { + + private StringUtils() {} public static final String EMPTY = ""; public static final String NEW_LINE = "\n"; public static final String SQL_WILDCARD = "%"; + private static final String[] INTEGER_ORDINALS = new String[] { "th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th" }; + //CamelCase to camel_case public static String camelCaseToUnderscore(String string) { if (!Strings.hasText(string)) { @@ -86,10 +90,6 @@ public abstract class StringUtils { return sb.toString(); } - public static String nullAsEmpty(String string) { - return string == null ? EMPTY : string; - } - // % -> .* // _ -> . // escape character - can be 0 (in which case every regex gets escaped) or @@ -297,4 +297,16 @@ public abstract class StringUtils { throw new SqlIllegalArgumentException("Cannot parse number [{}]", string); } } -} \ No newline at end of file + + public static String ordinal(int i) { + switch (i % 100) { + case 11: + case 12: + case 13: + return i + "th"; + default: + return i + INTEGER_ORDINALS[i % 10]; + + } + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 3b1e8da318f..c2310aa331e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -420,46 +420,16 @@ public class VerifierErrorMessagesTests extends ESTestCase { error("SELECT int FROM test GROUP BY int HAVING 2 < ABS(int)")); } - public void testInWithDifferentDataTypes_SelectClause() { - assertEquals("1:17: expected data type [integer], value provided is of type [keyword]", + public void testInWithDifferentDataTypes() { + assertEquals("1:8: 2nd argument of [1 IN (2, '3', 4)] must be [integer], found value ['3'] type [keyword]", error("SELECT 1 IN (2, '3', 4)")); } - public void testInNestedWithDifferentDataTypes_SelectClause() { - assertEquals("1:27: expected data type [integer], value provided is of type [keyword]", - error("SELECT 1 = 1 OR 1 IN (2, '3', 4)")); - } - - public void testInWithDifferentDataTypesFromLeftValue_SelectClause() { - assertEquals("1:14: expected data type [integer], value provided is of type [keyword]", + public void testInWithDifferentDataTypesFromLeftValue() { + assertEquals("1:8: 1st argument of [1 IN ('foo', 'bar')] must be [integer], found value ['foo'] type [keyword]", error("SELECT 1 IN ('foo', 'bar')")); } - public void testInNestedWithDifferentDataTypesFromLeftValue_SelectClause() { - assertEquals("1:29: expected data type [keyword], value provided is of type [integer]", - error("SELECT 1 = 1 OR 'foo' IN (2, 3)")); - } - - public void testInWithDifferentDataTypes_WhereClause() { - assertEquals("1:52: expected data type [keyword], value provided is of type [integer]", - error("SELECT * FROM test WHERE keyword IN ('foo', 'bar', 4)")); - } - - public void testInNestedWithDifferentDataTypes_WhereClause() { - assertEquals("1:63: expected data type [keyword], value provided is of type [integer]", - error("SELECT * FROM test WHERE int = 1 OR keyword IN ('foo', 'bar', 2)")); - } - - public void testInWithDifferentDataTypesFromLeftValue_WhereClause() { - assertEquals("1:38: expected data type [keyword], value provided is of type [integer]", - error("SELECT * FROM test WHERE keyword IN (1, 2)")); - } - - public void testInNestedWithDifferentDataTypesFromLeftValue_WhereClause() { - assertEquals("1:49: expected data type [keyword], value provided is of type [integer]", - error("SELECT * FROM test WHERE int = 1 OR keyword IN (1, 2)")); - } - public void testInWithFieldInListOfValues() { assertEquals("1:26: Comparisons against variables are not (currently) supported; offender [int] in [int IN (1, int)]", error("SELECT * FROM test WHERE int IN (1, int)")); @@ -615,32 +585,17 @@ public class VerifierErrorMessagesTests extends ESTestCase { incompatibleError("SELECT languages FROM \"*\" ORDER BY SIGN(ABS(emp_no))")); } - public void testConditionalWithDifferentDataTypes_SelectClause() { + public void testConditionalWithDifferentDataTypes() { @SuppressWarnings("unchecked") String function = randomFrom(IfNull.class, NullIf.class).getSimpleName(); - assertEquals("1:" + (22 + function.length()) + - ": expected data type [integer], value provided is of type [keyword]", - error("SELECT 1 = 1 OR " + function + "(3, '4') > 1")); + assertEquals("1:17: 2nd argument of [" + function + "(3, '4')] must be [integer], found value ['4'] type [keyword]", + error("SELECT 1 = 1 OR " + function + "(3, '4') > 1")); @SuppressWarnings("unchecked") - String arbirtraryArgsfunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); - assertEquals("1:" + (34 + arbirtraryArgsfunction.length()) + - ": expected data type [integer], value provided is of type [keyword]", - error("SELECT 1 = 1 OR " + arbirtraryArgsfunction + "(null, null, 3, '4') > 1")); - } - - public void testConditionalWithDifferentDataTypes_WhereClause() { - @SuppressWarnings("unchecked") - String function = randomFrom(IfNull.class, NullIf.class).getSimpleName(); - assertEquals("1:" + (34 + function.length()) + - ": expected data type [keyword], value provided is of type [integer]", - error("SELECT * FROM test WHERE " + function + "('foo', 4) > 1")); - - @SuppressWarnings("unchecked") - String arbirtraryArgsfunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); - assertEquals("1:" + (46 + arbirtraryArgsfunction.length()) + - ": expected data type [keyword], value provided is of type [integer]", - error("SELECT * FROM test WHERE " + arbirtraryArgsfunction + "(null, null, 'foo', 4) > 1")); + String arbirtraryArgsFunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); + assertEquals("1:17: 3rd argument of [" + arbirtraryArgsFunction + "(null, 3, '4')] must be [integer], " + + "found value ['4'] type [keyword]", + error("SELECT 1 = 1 OR " + arbirtraryArgsFunction + "(null, 3, '4') > 1")); } public void testAggsInWhere() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java index a810dac501e..56767d7e319 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java @@ -165,13 +165,13 @@ public class FunctionRegistryTests extends ESTestCase { public void testAliasNameIsTheSameAsAFunctionName() { FunctionRegistry r = new FunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS")); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + SqlIllegalArgumentException iae = expectThrows(SqlIllegalArgumentException.class, () -> r.addToMap(def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "DUMMY_FUNCTION"))); assertEquals("alias [DUMMY_FUNCTION] is used by [DUMMY_FUNCTION] and [DUMMY_FUNCTION2]", iae.getMessage()); } public void testDuplicateAliasInTwoDifferentFunctionsFromTheSameBatch() { - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + SqlIllegalArgumentException iae = expectThrows(SqlIllegalArgumentException.class, () -> new FunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS"), def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "ALIAS"))); assertEquals("alias [ALIAS] is used by [DUMMY_FUNCTION(ALIAS)] and [DUMMY_FUNCTION2]", iae.getMessage()); @@ -179,7 +179,7 @@ public class FunctionRegistryTests extends ESTestCase { public void testDuplicateAliasInTwoDifferentFunctionsFromTwoDifferentBatches() { FunctionRegistry r = new FunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS")); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + SqlIllegalArgumentException iae = expectThrows(SqlIllegalArgumentException.class, () -> r.addToMap(def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "ALIAS"))); assertEquals("alias [ALIAS] is used by [DUMMY_FUNCTION] and [DUMMY_FUNCTION2]", iae.getMessage()); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index 8e4c9c7dd59..a23d88b5956 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -108,7 +108,10 @@ import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.expression.Literal.FALSE; import static org.elasticsearch.xpack.sql.expression.Literal.NULL; +import static org.elasticsearch.xpack.sql.expression.Literal.TRUE; +import static org.elasticsearch.xpack.sql.expression.Literal.of; import static org.elasticsearch.xpack.sql.tree.Source.EMPTY; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; import static org.hamcrest.Matchers.contains; @@ -174,7 +177,7 @@ public class OptimizerTests extends ESTestCase { } private static Literal L(Object value) { - return Literal.of(EMPTY, value); + return of(EMPTY, value); } private static FieldAttribute getFieldAttribute() { @@ -190,8 +193,8 @@ public class OptimizerTests extends ESTestCase { } public void testDuplicateFunctions() { - AggregateFunction f1 = new Count(EMPTY, Literal.TRUE, false); - AggregateFunction f2 = new Count(EMPTY, Literal.TRUE, false); + AggregateFunction f1 = new Count(EMPTY, TRUE, false); + AggregateFunction f2 = new Count(EMPTY, TRUE, false); assertTrue(f1.functionEquals(f2)); @@ -284,34 +287,34 @@ public class OptimizerTests extends ESTestCase { } public void testConstantFoldingBinaryComparison() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, TWO, THREE)).canonical()); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, TWO, THREE)).canonical()); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new Equals(EMPTY, TWO, THREE)).canonical()); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new NullEquals(EMPTY, TWO, THREE)).canonical()); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new NullEquals(EMPTY, TWO, NULL)).canonical()); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new NotEquals(EMPTY, TWO, THREE)).canonical()); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, TWO, THREE)).canonical()); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThan(EMPTY, TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new Equals(EMPTY, TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new NullEquals(EMPTY, TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new NullEquals(EMPTY, TWO, NULL)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new NotEquals(EMPTY, TWO, THREE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, TWO, THREE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new LessThan(EMPTY, TWO, THREE)).canonical()); } public void testConstantFoldingBinaryLogic() { - assertEquals(Literal.FALSE, - new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, TWO, THREE), Literal.TRUE)).canonical()); - assertEquals(Literal.TRUE, - new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, TWO, THREE), Literal.TRUE)).canonical()); + assertEquals(FALSE, + new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, TWO, THREE), TRUE)).canonical()); + assertEquals(TRUE, + new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, TWO, THREE), TRUE)).canonical()); } public void testConstantFoldingBinaryLogic_WithNullHandling() { - assertEquals(NULL, new ConstantFolding().rule(new And(EMPTY, NULL, Literal.TRUE)).canonical()); - assertEquals(NULL, new ConstantFolding().rule(new And(EMPTY, Literal.TRUE, NULL)).canonical()); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, NULL, Literal.FALSE)).canonical()); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, Literal.FALSE, NULL)).canonical()); + assertEquals(NULL, new ConstantFolding().rule(new And(EMPTY, NULL, TRUE)).canonical()); + assertEquals(NULL, new ConstantFolding().rule(new And(EMPTY, TRUE, NULL)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, NULL, FALSE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, FALSE, NULL)).canonical()); assertEquals(NULL, new ConstantFolding().rule(new And(EMPTY, NULL, NULL)).canonical()); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, Literal.TRUE)).canonical()); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, Literal.TRUE, NULL)).canonical()); - assertEquals(NULL, new ConstantFolding().rule(new Or(EMPTY, NULL, Literal.FALSE)).canonical()); - assertEquals(NULL, new ConstantFolding().rule(new Or(EMPTY, Literal.FALSE, NULL)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, TRUE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, TRUE, NULL)).canonical()); + assertEquals(NULL, new ConstantFolding().rule(new Or(EMPTY, NULL, FALSE)).canonical()); + assertEquals(NULL, new ConstantFolding().rule(new Or(EMPTY, FALSE, NULL)).canonical()); assertEquals(NULL, new ConstantFolding().rule(new Or(EMPTY, NULL, NULL)).canonical()); } @@ -321,25 +324,25 @@ public class OptimizerTests extends ESTestCase { } public void testConstantIsNotNull() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(null)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, FIVE))); + assertEquals(FALSE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(null)))); + assertEquals(TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, FIVE))); } public void testConstantNot() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new Not(EMPTY, Literal.TRUE))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Not(EMPTY, Literal.FALSE))); + assertEquals(FALSE, new ConstantFolding().rule(new Not(EMPTY, TRUE))); + assertEquals(TRUE, new ConstantFolding().rule(new Not(EMPTY, FALSE))); } public void testConstantFoldingLikes() { - assertEquals(Literal.TRUE, - new ConstantFolding().rule(new Like(EMPTY, Literal.of(EMPTY, "test_emp"), new LikePattern("test%", (char) 0))) + assertEquals(TRUE, + new ConstantFolding().rule(new Like(EMPTY, of(EMPTY, "test_emp"), new LikePattern("test%", (char) 0))) .canonical()); - assertEquals(Literal.TRUE, - new ConstantFolding().rule(new RLike(EMPTY, Literal.of(EMPTY, "test_emp"), "test.emp")).canonical()); + assertEquals(TRUE, + new ConstantFolding().rule(new RLike(EMPTY, of(EMPTY, "test_emp"), "test.emp")).canonical()); } public void testConstantFoldingDatetime() { - Expression cast = new Cast(EMPTY, Literal.of(EMPTY, "2018-01-19T10:23:27Z"), DataType.DATETIME); + Expression cast = new Cast(EMPTY, of(EMPTY, "2018-01-19T10:23:27Z"), DataType.DATETIME); assertEquals(2018, foldFunction(new Year(EMPTY, cast, UTC))); assertEquals(1, foldFunction(new MonthOfYear(EMPTY, cast, UTC))); assertEquals(19, foldFunction(new DayOfMonth(EMPTY, cast, UTC))); @@ -407,44 +410,44 @@ public class OptimizerTests extends ESTestCase { public void testNullFoldingIsNull() { FoldNull foldNull = new FoldNull(); - assertEquals(true, foldNull.rule(new IsNull(EMPTY, Literal.NULL)).fold()); - assertEquals(false, foldNull.rule(new IsNull(EMPTY, Literal.TRUE)).fold()); + assertEquals(true, foldNull.rule(new IsNull(EMPTY, NULL)).fold()); + assertEquals(false, foldNull.rule(new IsNull(EMPTY, TRUE)).fold()); } public void testNullFoldingIsNotNull() { FoldNull foldNull = new FoldNull(); - assertEquals(true, foldNull.rule(new IsNotNull(EMPTY, Literal.TRUE)).fold()); - assertEquals(false, foldNull.rule(new IsNotNull(EMPTY, Literal.NULL)).fold()); + assertEquals(true, foldNull.rule(new IsNotNull(EMPTY, TRUE)).fold()); + assertEquals(false, foldNull.rule(new IsNotNull(EMPTY, NULL)).fold()); } public void testGenericNullableExpression() { FoldNull rule = new FoldNull(); // date-time - assertNullLiteral(rule.rule(new DayName(EMPTY, Literal.NULL, randomZone()))); + assertNullLiteral(rule.rule(new DayName(EMPTY, NULL, randomZone()))); // math function - assertNullLiteral(rule.rule(new Cos(EMPTY, Literal.NULL))); + assertNullLiteral(rule.rule(new Cos(EMPTY, NULL))); // string function - assertNullLiteral(rule.rule(new Ascii(EMPTY, Literal.NULL))); - assertNullLiteral(rule.rule(new Repeat(EMPTY, getFieldAttribute(), Literal.NULL))); + assertNullLiteral(rule.rule(new Ascii(EMPTY, NULL))); + assertNullLiteral(rule.rule(new Repeat(EMPTY, getFieldAttribute(), NULL))); // arithmetic - assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute(), Literal.NULL))); + assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute(), NULL))); // comparison - assertNullLiteral(rule.rule(new GreaterThan(EMPTY, getFieldAttribute(), Literal.NULL))); + assertNullLiteral(rule.rule(new GreaterThan(EMPTY, getFieldAttribute(), NULL))); // regex - assertNullLiteral(rule.rule(new RLike(EMPTY, Literal.NULL, "123"))); + assertNullLiteral(rule.rule(new RLike(EMPTY, NULL, "123"))); } public void testNullFoldingDoesNotApplyOnLogicalExpressions() { FoldNull rule = new FoldNull(); - Or or = new Or(EMPTY, Literal.NULL, Literal.TRUE); + Or or = new Or(EMPTY, NULL, TRUE); assertEquals(or, rule.rule(or)); - or = new Or(EMPTY, Literal.NULL, Literal.NULL); + or = new Or(EMPTY, NULL, NULL); assertEquals(or, rule.rule(or)); - And and = new And(EMPTY, Literal.NULL, Literal.TRUE); + And and = new And(EMPTY, NULL, TRUE); assertEquals(and, rule.rule(and)); - and = new And(EMPTY, Literal.NULL, Literal.NULL); + and = new And(EMPTY, NULL, NULL); assertEquals(and, rule.rule(and)); } @@ -455,11 +458,11 @@ public class OptimizerTests extends ESTestCase { Class clazz = (Class) randomFrom(IfNull.class, NullIf.class); Constructor ctor = clazz.getConstructor(Source.class, Expression.class, Expression.class); - ConditionalFunction conditionalFunction = ctor.newInstance(EMPTY, Literal.NULL, ONE); + ConditionalFunction conditionalFunction = ctor.newInstance(EMPTY, NULL, ONE); assertEquals(conditionalFunction, rule.rule(conditionalFunction)); - conditionalFunction = ctor.newInstance(EMPTY, ONE, Literal.NULL); + conditionalFunction = ctor.newInstance(EMPTY, ONE, NULL); assertEquals(conditionalFunction, rule.rule(conditionalFunction)); - conditionalFunction = ctor.newInstance(EMPTY, Literal.NULL, Literal.NULL); + conditionalFunction = ctor.newInstance(EMPTY, NULL, NULL); assertEquals(conditionalFunction, rule.rule(conditionalFunction)); } @@ -470,14 +473,14 @@ public class OptimizerTests extends ESTestCase { Class clazz = (Class) randomFrom(Coalesce.class, Greatest.class, Least.class); Constructor ctor = clazz.getConstructor(Source.class, List.class); - ArbitraryConditionalFunction conditionalFunction = ctor.newInstance(EMPTY, Arrays.asList(Literal.NULL, ONE, TWO)); + ArbitraryConditionalFunction conditionalFunction = ctor.newInstance(EMPTY, Arrays.asList(NULL, ONE, TWO)); assertEquals(conditionalFunction, rule.rule(conditionalFunction)); - conditionalFunction = ctor.newInstance(EMPTY, Arrays.asList(Literal.NULL, NULL, NULL)); + conditionalFunction = ctor.newInstance(EMPTY, Arrays.asList(NULL, NULL, NULL)); assertEquals(conditionalFunction, rule.rule(conditionalFunction)); } public void testSimplifyCoalesceNulls() { - Expression e = new SimplifyConditional().rule(new Coalesce(EMPTY, asList(Literal.NULL, Literal.NULL))); + Expression e = new SimplifyConditional().rule(new Coalesce(EMPTY, asList(NULL, NULL))); assertEquals(Coalesce.class, e.getClass()); assertEquals(0, e.children().size()); } @@ -491,51 +494,51 @@ public class OptimizerTests extends ESTestCase { public void testSimplifyCoalesceRandomNullsWithValue() { Expression e = new SimplifyConditional().rule(new Coalesce(EMPTY, CollectionUtils.combine( - CollectionUtils.combine(randomListOfNulls(), Literal.TRUE, Literal.FALSE, Literal.TRUE), + CollectionUtils.combine(randomListOfNulls(), TRUE, FALSE, TRUE), randomListOfNulls()))); assertEquals(1, e.children().size()); - assertEquals(Literal.TRUE, e.children().get(0)); + assertEquals(TRUE, e.children().get(0)); } private List randomListOfNulls() { - return asList(randomArray(1, 10, Literal[]::new, () -> Literal.NULL)); + return asList(randomArray(1, 10, Literal[]::new, () -> NULL)); } public void testSimplifyCoalesceFirstLiteral() { Expression e = new SimplifyConditional() .rule(new Coalesce(EMPTY, - Arrays.asList(Literal.NULL, Literal.TRUE, Literal.FALSE, new Abs(EMPTY, getFieldAttribute())))); + Arrays.asList(NULL, TRUE, FALSE, new Abs(EMPTY, getFieldAttribute())))); assertEquals(Coalesce.class, e.getClass()); assertEquals(1, e.children().size()); - assertEquals(Literal.TRUE, e.children().get(0)); + assertEquals(TRUE, e.children().get(0)); } public void testSimplifyIfNullNulls() { - Expression e = new SimplifyConditional().rule(new IfNull(EMPTY, Literal.NULL, Literal.NULL)); + Expression e = new SimplifyConditional().rule(new IfNull(EMPTY, NULL, NULL)); assertEquals(IfNull.class, e.getClass()); assertEquals(0, e.children().size()); } public void testSimplifyIfNullWithNullAndValue() { - Expression e = new SimplifyConditional().rule(new IfNull(EMPTY, Literal.NULL, ONE)); + Expression e = new SimplifyConditional().rule(new IfNull(EMPTY, NULL, ONE)); assertEquals(IfNull.class, e.getClass()); assertEquals(1, e.children().size()); assertEquals(ONE, e.children().get(0)); - e = new SimplifyConditional().rule(new IfNull(EMPTY, ONE, Literal.NULL)); + e = new SimplifyConditional().rule(new IfNull(EMPTY, ONE, NULL)); assertEquals(IfNull.class, e.getClass()); assertEquals(1, e.children().size()); assertEquals(ONE, e.children().get(0)); } public void testFoldNullNotAppliedOnNullIf() { - Expression orig = new NullIf(EMPTY, ONE, Literal.NULL); + Expression orig = new NullIf(EMPTY, ONE, NULL); Expression f = new FoldNull().rule(orig); assertEquals(orig, f); } public void testSimplifyGreatestNulls() { - Expression e = new SimplifyConditional().rule(new Greatest(EMPTY, asList(Literal.NULL, Literal.NULL))); + Expression e = new SimplifyConditional().rule(new Greatest(EMPTY, asList(NULL, NULL))); assertEquals(Greatest.class, e.getClass()); assertEquals(0, e.children().size()); } @@ -556,7 +559,7 @@ public class OptimizerTests extends ESTestCase { } public void testSimplifyLeastNulls() { - Expression e = new SimplifyConditional().rule(new Least(EMPTY, asList(Literal.NULL, Literal.NULL))); + Expression e = new SimplifyConditional().rule(new Least(EMPTY, asList(NULL, NULL))); assertEquals(Least.class, e.getClass()); assertEquals(0, e.children().size()); } @@ -578,9 +581,9 @@ public class OptimizerTests extends ESTestCase { public void testConcatFoldingIsNotNull() { FoldNull foldNull = new FoldNull(); - assertEquals(1, foldNull.rule(new Concat(EMPTY, Literal.NULL, ONE)).fold()); - assertEquals(1, foldNull.rule(new Concat(EMPTY, ONE, Literal.NULL)).fold()); - assertEquals(StringUtils.EMPTY, foldNull.rule(new Concat(EMPTY, Literal.NULL, Literal.NULL)).fold()); + assertEquals(1, foldNull.rule(new Concat(EMPTY, NULL, ONE)).fold()); + assertEquals(1, foldNull.rule(new Concat(EMPTY, ONE, NULL)).fold()); + assertEquals(StringUtils.EMPTY, foldNull.rule(new Concat(EMPTY, NULL, NULL)).fold()); } // @@ -593,15 +596,15 @@ public class OptimizerTests extends ESTestCase { } public void testBinaryComparisonSimplification() { - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, FIVE, FIVE))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new NullEquals(EMPTY, FIVE, FIVE))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new NullEquals(EMPTY, NULL, NULL))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new NotEquals(EMPTY, FIVE, FIVE))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, FIVE, FIVE))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, FIVE, FIVE))); + assertEquals(TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, FIVE, FIVE))); + assertEquals(TRUE, new BinaryComparisonSimplification().rule(new NullEquals(EMPTY, FIVE, FIVE))); + assertEquals(TRUE, new BinaryComparisonSimplification().rule(new NullEquals(EMPTY, NULL, NULL))); + assertEquals(FALSE, new BinaryComparisonSimplification().rule(new NotEquals(EMPTY, FIVE, FIVE))); + assertEquals(TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, FIVE, FIVE))); + assertEquals(TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, FIVE, FIVE))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, FIVE, FIVE))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, FIVE, FIVE))); + assertEquals(FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, FIVE, FIVE))); + assertEquals(FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, FIVE, FIVE))); } public void testNullEqualsWithNullLiteralBecomesIsNull() { @@ -648,25 +651,25 @@ public class OptimizerTests extends ESTestCase { public void testBoolSimplifyOr() { BooleanSimplification simplification = new BooleanSimplification(); - assertEquals(Literal.TRUE, simplification.rule(new Or(EMPTY, Literal.TRUE, Literal.TRUE))); - assertEquals(Literal.TRUE, simplification.rule(new Or(EMPTY, Literal.TRUE, DUMMY_EXPRESSION))); - assertEquals(Literal.TRUE, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, Literal.TRUE))); + assertEquals(TRUE, simplification.rule(new Or(EMPTY, TRUE, TRUE))); + assertEquals(TRUE, simplification.rule(new Or(EMPTY, TRUE, DUMMY_EXPRESSION))); + assertEquals(TRUE, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, TRUE))); - assertEquals(Literal.FALSE, simplification.rule(new Or(EMPTY, Literal.FALSE, Literal.FALSE))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, Literal.FALSE, DUMMY_EXPRESSION))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, Literal.FALSE))); + assertEquals(FALSE, simplification.rule(new Or(EMPTY, FALSE, FALSE))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, FALSE, DUMMY_EXPRESSION))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, FALSE))); } public void testBoolSimplifyAnd() { BooleanSimplification simplification = new BooleanSimplification(); - assertEquals(Literal.TRUE, simplification.rule(new And(EMPTY, Literal.TRUE, Literal.TRUE))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, Literal.TRUE, DUMMY_EXPRESSION))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, Literal.TRUE))); + assertEquals(TRUE, simplification.rule(new And(EMPTY, TRUE, TRUE))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, TRUE, DUMMY_EXPRESSION))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, TRUE))); - assertEquals(Literal.FALSE, simplification.rule(new And(EMPTY, Literal.FALSE, Literal.FALSE))); - assertEquals(Literal.FALSE, simplification.rule(new And(EMPTY, Literal.FALSE, DUMMY_EXPRESSION))); - assertEquals(Literal.FALSE, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, Literal.FALSE))); + assertEquals(FALSE, simplification.rule(new And(EMPTY, FALSE, FALSE))); + assertEquals(FALSE, simplification.rule(new And(EMPTY, FALSE, DUMMY_EXPRESSION))); + assertEquals(FALSE, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, FALSE))); } public void testBoolCommonFactorExtraction() { @@ -710,7 +713,7 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsNotComparable() { FieldAttribute fa = getFieldAttribute(); LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, SIX); - LessThan lt = new LessThan(EMPTY, fa, Literal.FALSE); + LessThan lt = new LessThan(EMPTY, fa, FALSE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); And and = new And(EMPTY, lte, lt); @@ -790,7 +793,7 @@ public class OptimizerTests extends ESTestCase { CombineBinaryComparisons rule = new CombineBinaryComparisons(); // TRUE AND a != 5 AND 4 < a <= 7 - Expression exp = rule.rule(new And(EMPTY, gte, new And(EMPTY, Literal.TRUE, new And(EMPTY, gt, new And(EMPTY, ne, lte))))); + Expression exp = rule.rule(new And(EMPTY, gte, new And(EMPTY, TRUE, new And(EMPTY, gt, new And(EMPTY, ne, lte))))); assertEquals(And.class, exp.getClass()); And and = ((And) exp); assertEquals(Range.class, and.right().getClass()); @@ -943,7 +946,7 @@ public class OptimizerTests extends ESTestCase { FieldAttribute fa = getFieldAttribute(); GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); - GreaterThan gt2 = new GreaterThan(EMPTY, fa, Literal.FALSE); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, FALSE); Or or = new Or(EMPTY, gt1, gt2); @@ -1056,7 +1059,7 @@ public class OptimizerTests extends ESTestCase { FieldAttribute fa = getFieldAttribute(); Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); - Range r2 = new Range(EMPTY, fa, ONE, false, Literal.FALSE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, FALSE, false); Or or = new Or(EMPTY, r1, r2); @@ -1194,7 +1197,7 @@ public class OptimizerTests extends ESTestCase { PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); - assertEquals(Literal.FALSE, rule.rule(exp)); + assertEquals(FALSE, rule.rule(exp)); } // a <=> 1 AND a <=> 2 -> FALSE @@ -1205,7 +1208,7 @@ public class OptimizerTests extends ESTestCase { PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); - assertEquals(Literal.FALSE, rule.rule(exp)); + assertEquals(FALSE, rule.rule(exp)); } // 1 < a < 10 AND a == 10 -> FALSE @@ -1216,7 +1219,7 @@ public class OptimizerTests extends ESTestCase { PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, r)); - assertEquals(Literal.FALSE, rule.rule(exp)); + assertEquals(FALSE, rule.rule(exp)); } // 1 < a < 10 AND a <=> 10 -> FALSE @@ -1227,7 +1230,7 @@ public class OptimizerTests extends ESTestCase { PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, r)); - assertEquals(Literal.FALSE, rule.rule(exp)); + assertEquals(FALSE, rule.rule(exp)); } public void testTranslateMinToFirst() { From 5990c54801e7508aca42844ac69b71f1055784cb Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Mon, 8 Apr 2019 09:31:26 -0700 Subject: [PATCH 36/45] [7.x] Docs: Simplifying setup by using module configuration variant syntax (#40879) (#40974) Backports the following commits to 7.x: - Docs: Simplifying setup by using module configuration variant syntax (#40879) --- .../configuring-metricbeat.asciidoc | 41 ++++--------------- 1 file changed, 9 insertions(+), 32 deletions(-) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index a161559d3f1..f41e02b0095 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -81,7 +81,7 @@ run the following command: ["source","sh",subs="attributes,callouts"] ---------------------------------------------------------------------- -metricbeat modules enable elasticsearch +metricbeat modules enable elasticsearch-xpack ---------------------------------------------------------------------- For more information, see @@ -89,45 +89,22 @@ For more information, see {metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. -- -.. Configure the {es} module in {metricbeat}. + -+ --- -You must specify the following settings in the `modules.d/elasticsearch.yml` file: +.. By default the module will collect {es} monitoring metrics from `http://localhost:9200`. +If the local {es} node has a different address, you must specify it via the `hosts` setting +in the `modules.d/elasticsearch-xpack.yml` file. -[source,yaml] ----------------------------------- -- module: elasticsearch - metricsets: - - ccr - - cluster_stats - - index - - index_recovery - - index_summary - - ml_job - - node_stats - - shard - period: 10s - hosts: ["http://localhost:9200"] <1> - xpack.enabled: true <2> ----------------------------------- -<1> This setting identifies the host and port number that are used to access {es}. -<2> This setting ensures that {kib} can read this monitoring data successfully. -That is to say, it's stored in the same location and format as monitoring data -that is sent by <>. --- - -.. If Elastic {security-features} are enabled, you must also provide a user ID +.. If Elastic {security-features} are enabled, you must also provide a user ID and password so that {metricbeat} can collect metrics successfully. -... Create a user on the production cluster that has the +... Create a user on the production cluster that has the {stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. -... Add the `username` and `password` settings to the {es} module configuration +... Add the `username` and `password` settings to the {es} module configuration file. + -- -For example, add the following settings in the `modules.d/elasticsearch.yml` file: +For example, add the following settings in the `modules.d/elasticsearch-xpack.yml` file: [source,yaml] ---------------------------------- @@ -140,7 +117,7 @@ For example, add the following settings in the `modules.d/elasticsearch.yml` fil .. If you configured {es} to use <>, you must access it via HTTPS. For example, use a `hosts` setting like -`https://localhost:9200` in the `modules.d/elasticsearch.yml` file. +`https://localhost:9200` in the `modules.d/elasticsearch-xpack.yml` file. .. Identify where to send the monitoring data. + + From efe45011a57ed36cf8405af1cb6d5e1b23918645 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 8 Apr 2019 11:36:29 -0700 Subject: [PATCH 37/45] [DOCS] Fix deprecation notification (#40980) --- docs/java-api/query-dsl/type-query.asciidoc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/java-api/query-dsl/type-query.asciidoc b/docs/java-api/query-dsl/type-query.asciidoc index 93c7bd76dfe..160deedb9ea 100644 --- a/docs/java-api/query-dsl/type-query.asciidoc +++ b/docs/java-api/query-dsl/type-query.asciidoc @@ -1,7 +1,10 @@ [[java-query-dsl-type-query]] ==== Type Query -deprecated[7.0.0, Types are being removed, prefer filtering on a field instead. For more information, please see {ref}/removal-of-types.html[Removal of mapping types].] +deprecated[7.0.0] + +Types are being removed, prefer filtering on a field instead. For +more information, see {ref}/removal-of-types.html[Removal of mapping types]. See {ref}/query-dsl-type-query.html[Type Query] From 335955b8742ee256914193092cfdc6687afd19c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 8 Apr 2019 20:47:53 +0200 Subject: [PATCH 38/45] Some internal refactorings in AnalysisRegistry (#40609) Reducing some methods scope and marking them as static where possible. Removing "alias" support from AnalysisRegistry#produceAnalyze and changing that method to return a NamedAnalyzer instead of having a side effect on the analyzer map passed in. Also, CustomAnalyzerProvider doesn't seem to need the `environment` field. --- .../index/analysis/AnalysisRegistry.java | 34 +++++++------------ .../analysis/CustomAnalyzerProvider.java | 7 ++-- .../indices/analysis/AnalysisModuleTests.java | 13 ------- 3 files changed, 15 insertions(+), 39 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 61b1cb057fd..b0d9c778d2a 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -293,13 +293,13 @@ public final class AnalysisRegistry implements Closeable { T factory = null; if (typeName == null) { if (currentSettings.get("tokenizer") != null) { - factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); + factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); } else { throw new IllegalArgumentException(component + " [" + name + "] " + "must specify either an analyzer type, or a tokenizer"); } } else if (typeName.equals("custom")) { - factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); + factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); } if (factory != null) { factories.put(name, factory); @@ -430,8 +430,10 @@ public final class AnalysisRegistry implements Closeable { Map normalizers = new HashMap<>(); Map whitespaceNormalizers = new HashMap<>(); for (Map.Entry> entry : analyzerProviders.entrySet()) { - processAnalyzerFactory(indexSettings, entry.getKey(), entry.getValue(), analyzers, - tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); + analyzers.merge(entry.getKey(), produceAnalyzer(entry.getKey(), entry.getValue(), tokenFilterFactoryFactories, + charFilterFactoryFactories, tokenizerFactoryFactories), (k, v) -> { + throw new IllegalStateException("already registered analyzer with name: " + entry.getKey()); + }); } for (Map.Entry> entry : normalizerProviders.entrySet()) { processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, "keyword", @@ -441,9 +443,9 @@ public final class AnalysisRegistry implements Closeable { } if (!analyzers.containsKey("default")) { - processAnalyzerFactory(indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, - "default", Settings.Builder.EMPTY_SETTINGS), - analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); + NamedAnalyzer defaultAnalyzer = produceAnalyzer("default", new StandardAnalyzerProvider(indexSettings, null, "default", + Settings.Builder.EMPTY_SETTINGS), tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); + analyzers.put("default", defaultAnalyzer); } if (!analyzers.containsKey("default_search")) { analyzers.put("default_search", analyzers.get("default")); @@ -473,11 +475,9 @@ public final class AnalysisRegistry implements Closeable { whitespaceNormalizers); } - private void processAnalyzerFactory(IndexSettings indexSettings, - String name, - AnalyzerProvider analyzerFactory, - Map analyzers, Map tokenFilters, - Map charFilters, Map tokenizers) { + private static NamedAnalyzer produceAnalyzer(String name, AnalyzerProvider analyzerFactory, + Map tokenFilters, Map charFilters, + Map tokenizers) { /* * Lucene defaults positionIncrementGap to 0 in all analyzers but * Elasticsearch defaults them to 0 only before version 2.0 @@ -511,15 +511,7 @@ public final class AnalysisRegistry implements Closeable { } else { analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap); } - if (analyzers.containsKey(name)) { - throw new IllegalStateException("already registered analyzer with name: " + name); - } - analyzers.put(name, analyzer); - // TODO: remove alias support completely when we no longer support pre 5.0 indices - final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias"; - if (indexSettings.getSettings().get(analyzerAliasKey) != null) { - throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported"); - } + return analyzer; } private void processNormalizerFactory( diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java index 96fe9454f63..8080a6af876 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.analysis; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; @@ -35,18 +34,16 @@ import java.util.Map; public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final Settings analyzerSettings; - private final Environment environment; private CustomAnalyzer customAnalyzer; public CustomAnalyzerProvider(IndexSettings indexSettings, - String name, Settings settings, Environment environment) { + String name, Settings settings) { super(indexSettings, name, settings); this.analyzerSettings = settings; - this.environment = environment; } - public void build(final Map tokenizers, final Map charFilters, + void build(final Map tokenizers, final Map charFilters, final Map tokenFilters) { String tokenizerName = analyzerSettings.get("tokenizer"); if (tokenizerName == null) { diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index c769da0af4d..c4cd37a91b1 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -131,19 +131,6 @@ public class AnalysisModuleTests extends ESTestCase { testSimpleConfiguration(settings); } - public void testAnalyzerAliasNotAllowedPost5x() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.type", "standard") - .put("index.analysis.analyzer.foobar.alias","foobaz") - // analyzer aliases were removed in v5.0.0 alpha6 - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, null)) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - AnalysisRegistry registry = getNewRegistry(settings); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(registry, settings)); - assertEquals("setting [index.analysis.analyzer.foobar.alias] is not supported", e.getMessage()); - } - public void testVersionedAnalyzers() throws Exception { String yaml = "/org/elasticsearch/index/analysis/test1.yml"; Settings settings2 = Settings.builder() From 26d8ecfe07d9d4254e12185d81c2f7a8f5869cef Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 8 Apr 2019 16:10:24 -0400 Subject: [PATCH 39/45] Fix unsafe publication in opt-out query cache (#40957) This opt-out query cache has an unsafe publication issue, where the cache is exposed to another thread (namely the cluster state update thread) before the constructor has finished execution. This exposes the opt-out query cache to concurrency bugs. This commit addresses this by ensuring that the opt-out query cache is not registered as a listener for license state changes until after the constructor has returned. --- .../xpack/security/Security.java | 16 ++++++++---- .../authz/accesscontrol/OptOutQueryCache.java | 26 ++++++++++++++++--- .../accesscontrol/OptOutQueryCacheTests.java | 4 +++ 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 7b7e72fdd6b..f5c1aa81908 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -691,12 +691,18 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw indexService.cache() != null ? indexService.cache().bitsetFilterCache() : null, indexService.getThreadPool().getThreadContext(), getLicenseState(), indexService.getScriptService())); - /* We need to forcefully overwrite the query cache implementation to use security's opt out query cache implementation. - * This impl. disabled the query cache if field level security is used for a particular request. If we wouldn't do - * forcefully overwrite the query cache implementation then we leave the system vulnerable to leakages of data to - * unauthorized users. */ + /* + * We need to forcefully overwrite the query cache implementation to use security's opt-out query cache implementation. This + * implementation disables the query cache if field level security is used for a particular request. We have to forcefully + * overwrite the query cache implementation to prevent data leakage to unauthorized users. + */ module.forceQueryCacheProvider( - (settings, cache) -> new OptOutQueryCache(settings, cache, threadContext.get(), getLicenseState())); + (settings, cache) -> { + final OptOutQueryCache queryCache = + new OptOutQueryCache(settings, cache, threadContext.get(), getLicenseState()); + queryCache.listenForLicenseStateChanges(); + return queryCache; + }); } // in order to prevent scroll ids from being maliciously crafted and/or guessed, a listener is added that diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java index 78058080e5b..daadac78ae4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java @@ -24,8 +24,9 @@ import java.util.Objects; import java.util.Set; /** - * Opts out of the query cache if field level security is active for the current request, - * and its unsafe to cache. + * Opts out of the query cache if field level security is active for the current request, and it is unsafe to cache. Note that the method + * {@link #listenForLicenseStateChanges()} must be invoked after construction of the query cache and before any other public methods are + * invoked on this query cache. */ public final class OptOutQueryCache extends AbstractIndexComponent implements LicenseStateListener, QueryCache { @@ -33,6 +34,7 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Li private final ThreadContext context; private final String indexName; private final XPackLicenseState licenseState; + private volatile boolean licenseStateListenerRegistered; public OptOutQueryCache( final IndexSettings indexSettings, @@ -44,28 +46,46 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Li this.context = Objects.requireNonNull(context, "threadContext must not be null"); this.indexName = indexSettings.getIndex().getName(); this.licenseState = Objects.requireNonNull(licenseState, "licenseState"); + } + + /** + * Register this query cache to listen for license state changes. This must be done after construction of this query cache before any + * other public methods are invoked on this query cache. + */ + public void listenForLicenseStateChanges() { + /* + * Registering this as a listener can not be done in the constructor because otherwise it would be unsafe publication of this. That + * is, it would expose this to another thread before the constructor had finished. Therefore, we have a dedicated method to register + * the listener that is invoked after the constructor has returned. + */ + assert licenseStateListenerRegistered == false; licenseState.addListener(this); + licenseStateListenerRegistered = true; } @Override public void close() throws ElasticsearchException { + assert licenseStateListenerRegistered; licenseState.removeListener(this); clear("close"); } @Override public void licenseStateChanged() { + assert licenseStateListenerRegistered; clear("license state changed"); } @Override - public void clear(String reason) { + public void clear(final String reason) { + assert licenseStateListenerRegistered; logger.debug("full cache clear, reason [{}]", reason); indicesQueryCache.clearIndex(index().getName()); } @Override public Weight doCache(Weight weight, QueryCachingPolicy policy) { + assert licenseStateListenerRegistered; if (licenseState.isAuthAllowed() == false) { logger.debug("not opting out of the query cache; authorization is not allowed"); return indicesQueryCache.doCache(weight, policy); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java index 54dbdc3d33d..99d23cc8b10 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java @@ -136,6 +136,7 @@ public class OptOutQueryCacheTests extends ESTestCase { final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(false); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); final Weight weight = mock(Weight.class); final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); cache.doCache(weight, policy); @@ -154,6 +155,7 @@ public class OptOutQueryCacheTests extends ESTestCase { final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(true); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); final Weight weight = mock(Weight.class); final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); final Weight w = cache.doCache(weight, policy); @@ -178,6 +180,7 @@ public class OptOutQueryCacheTests extends ESTestCase { final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(true); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); final Weight weight = mock(Weight.class); final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); cache.doCache(weight, policy); @@ -195,6 +198,7 @@ public class OptOutQueryCacheTests extends ESTestCase { final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final XPackLicenseState licenseState = mock(XPackLicenseState.class); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); verify(licenseState).addListener(cache); cache.close(); verify(licenseState).removeListener(cache); From 9e271b9ec2e7985e82496912875022afa380d22a Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 8 Apr 2019 16:11:32 -0400 Subject: [PATCH 40/45] [DOCS] Fix broken link to Elasticsearh Docker source code (#40979) * [DOCS] Fix broken link to Elasticsearch Docker source code * [DOCS] Link to Dockerfile in elastic/elasticsearch repo * [DOCS] Link to Docker source files in elastic/elasticsearch repo --- docs/reference/setup/install/docker.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index b64b15703b2..83f80b569a8 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -5,8 +5,9 @@ The images use https://hub.docker.com/_/centos/[centos:7] as the base image. A list of all published Docker images and tags is available at -https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in -https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub]. +https://www.docker.elastic.co[www.docker.elastic.co]. The source files +are in +https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[Github]. These images are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. From 1ed4f215acee3ce936c6d9ae4179f932f74679af Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 8 Apr 2019 18:54:29 -0700 Subject: [PATCH 41/45] [DOCS] Add notable-breaking-changes tags (#40991) --- docs/reference/migration/migrate_7_0.asciidoc | 11 +---------- .../migration/migrate_7_0/aggregations.asciidoc | 8 ++++++++ .../reference/migration/migrate_7_0/analysis.asciidoc | 7 +++++++ docs/reference/migration/migrate_7_0/api.asciidoc | 8 ++++++++ docs/reference/migration/migrate_7_0/cluster.asciidoc | 7 +++++++ .../migration/migrate_7_0/discovery.asciidoc | 7 +++++++ docs/reference/migration/migrate_7_0/indices.asciidoc | 7 +++++++ docs/reference/migration/migrate_7_0/java.asciidoc | 7 +++++++ .../migration/migrate_7_0/java_time.asciidoc | 7 +++++++ docs/reference/migration/migrate_7_0/logging.asciidoc | 7 +++++++ .../migrate_7_0/low_level_restclient.asciidoc | 7 +++++++ .../reference/migration/migrate_7_0/mappings.asciidoc | 7 +++++++ docs/reference/migration/migrate_7_0/node.asciidoc | 7 +++++++ .../migration/migrate_7_0/packaging.asciidoc | 7 +++++++ docs/reference/migration/migrate_7_0/plugins.asciidoc | 7 +++++++ .../migration/migrate_7_0/restclient.asciidoc | 7 +++++++ .../migration/migrate_7_0/scripting.asciidoc | 8 ++++++++ docs/reference/migration/migrate_7_0/search.asciidoc | 7 +++++++ .../reference/migration/migrate_7_0/settings.asciidoc | 7 +++++++ .../migration/migrate_7_0/snapshotstats.asciidoc | 7 +++++++ .../migration/migrate_7_0/suggesters.asciidoc | 7 +++++++ 21 files changed, 144 insertions(+), 10 deletions(-) diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 94af19d033f..3d723074e9d 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -9,7 +9,6 @@ your application to Elasticsearch 7.0. See also <> and <>. -* <> * <> * <> * <> @@ -32,13 +31,7 @@ See also <> and <>. * <> [float] -[[breaking_70_notable]] -=== Notable changes -// NOTE: The content in this section is also used in the Installation and Upgrade Guide. - -//tag::notable-breaking-changes[] -[float] -==== Indices created before 7.0 +=== Indices created before 7.0 Elasticsearch 7.0 can read indices created in version 6.0 or above. An Elasticsearch 7.0 node will not start in the presence of indices created in a @@ -53,8 +46,6 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. ========================================= -// end::notable-breaking-changes[] - include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] include::migrate_7_0/cluster.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/aggregations.asciidoc b/docs/reference/migration/migrate_7_0/aggregations.asciidoc index bda2e67fa2e..6fb974a1095 100644 --- a/docs/reference/migration/migrate_7_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_7_0/aggregations.asciidoc @@ -2,6 +2,14 @@ [[breaking_70_aggregations_changes]] === Aggregations changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + + [float] ==== Deprecated `global_ordinals_hash` and `global_ordinals_low_cardinality` execution hints for terms aggregations have been removed diff --git a/docs/reference/migration/migrate_7_0/analysis.asciidoc b/docs/reference/migration/migrate_7_0/analysis.asciidoc index 88da316ba84..a381a86fe2b 100644 --- a/docs/reference/migration/migrate_7_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_7_0/analysis.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_analysis_changes]] === Analysis changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Limiting the number of tokens produced by _analyze diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 2f406ef7877..cdcf6a93e36 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -2,6 +2,14 @@ [[breaking_70_api_changes]] === API changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + + [float] ==== Internal Versioning is no longer supported for optimistic concurrency control diff --git a/docs/reference/migration/migrate_7_0/cluster.asciidoc b/docs/reference/migration/migrate_7_0/cluster.asciidoc index bfe7d5df2d0..c6ad953d1b1 100644 --- a/docs/reference/migration/migrate_7_0/cluster.asciidoc +++ b/docs/reference/migration/migrate_7_0/cluster.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_cluster_changes]] === Cluster changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== `:` is no longer allowed in cluster name diff --git a/docs/reference/migration/migrate_7_0/discovery.asciidoc b/docs/reference/migration/migrate_7_0/discovery.asciidoc index e56fbba426d..dad62b9d46f 100644 --- a/docs/reference/migration/migrate_7_0/discovery.asciidoc +++ b/docs/reference/migration/migrate_7_0/discovery.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_discovery_changes]] === Discovery changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Cluster bootstrapping is required if discovery is configured diff --git a/docs/reference/migration/migrate_7_0/indices.asciidoc b/docs/reference/migration/migrate_7_0/indices.asciidoc index e136390bb28..ce16646c3b1 100644 --- a/docs/reference/migration/migrate_7_0/indices.asciidoc +++ b/docs/reference/migration/migrate_7_0/indices.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_indices_changes]] === Indices changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Index creation no longer defaults to five shards Previous versions of Elasticsearch defaulted to creating five shards per index. diff --git a/docs/reference/migration/migrate_7_0/java.asciidoc b/docs/reference/migration/migrate_7_0/java.asciidoc index f34b1c6ca99..2d3a2101f6b 100644 --- a/docs/reference/migration/migrate_7_0/java.asciidoc +++ b/docs/reference/migration/migrate_7_0/java.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_java_changes]] === Java API changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== `isShardsAcked` deprecated in `6.2` has been removed diff --git a/docs/reference/migration/migrate_7_0/java_time.asciidoc b/docs/reference/migration/migrate_7_0/java_time.asciidoc index 8decba81b70..80a3ab0cf9a 100644 --- a/docs/reference/migration/migrate_7_0/java_time.asciidoc +++ b/docs/reference/migration/migrate_7_0/java_time.asciidoc @@ -1,3 +1,10 @@ +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] [[breaking_70_java_time_changes]] === Replacing Joda-Time with java time diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc index 1329def9a18..ea9173c7939 100644 --- a/docs/reference/migration/migrate_7_0/logging.asciidoc +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_logging_changes]] === Logging changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== New JSON format log files in `log` directory diff --git a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc index ae7524f324f..e3132b59680 100644 --- a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc +++ b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_low_level_restclient_changes]] === Low-level REST client changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Support for `maxRetryTimeout` removed from RestClient diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index bfe80cf100c..422c649e33b 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_mappings_changes]] === Mapping changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== The `_all` meta field is removed diff --git a/docs/reference/migration/migrate_7_0/node.asciidoc b/docs/reference/migration/migrate_7_0/node.asciidoc index 3b8a9d84e76..a90366189ec 100644 --- a/docs/reference/migration/migrate_7_0/node.asciidoc +++ b/docs/reference/migration/migrate_7_0/node.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_node_changes]] === Node start up +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Nodes with left-behind data or metadata refuse to start Repurposing an existing node by changing node.master or node.data to false can leave lingering on-disk metadata and diff --git a/docs/reference/migration/migrate_7_0/packaging.asciidoc b/docs/reference/migration/migrate_7_0/packaging.asciidoc index b3bf1823736..db5df312356 100644 --- a/docs/reference/migration/migrate_7_0/packaging.asciidoc +++ b/docs/reference/migration/migrate_7_0/packaging.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_packaging_changes]] === Packaging changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] [[systemd-service-file-config]] ==== systemd service file is no longer configuration diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index a0e89469fd5..ecae1280c55 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_plugins_changes]] === Plugins changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Azure Repository plugin diff --git a/docs/reference/migration/migrate_7_0/restclient.asciidoc b/docs/reference/migration/migrate_7_0/restclient.asciidoc index d4f99369121..9dff47f5737 100644 --- a/docs/reference/migration/migrate_7_0/restclient.asciidoc +++ b/docs/reference/migration/migrate_7_0/restclient.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_restclient_changes]] === High-level REST client changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== API methods accepting `Header` argument have been removed diff --git a/docs/reference/migration/migrate_7_0/scripting.asciidoc b/docs/reference/migration/migrate_7_0/scripting.asciidoc index 99afca91e01..ccbf3d38a92 100644 --- a/docs/reference/migration/migrate_7_0/scripting.asciidoc +++ b/docs/reference/migration/migrate_7_0/scripting.asciidoc @@ -2,6 +2,14 @@ [[breaking_70_scripting_changes]] === Scripting changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + + [float] ==== getDate() and getDates() removed diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index f2c3d4d397a..f0591408964 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_search_changes]] === Search and Query DSL changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Off-heap terms index diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 0c8d26aae9d..f2488092c85 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_settings_changes]] === Settings changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== The default for `node.name` is now the hostname diff --git a/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc b/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc index 2098eb3574c..bea15fccb9e 100644 --- a/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc +++ b/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_snapshotstats_changes]] === Snapshot stats changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + Snapshot stats details are provided in a new structured way: * `total` section for all the files that are referenced by the snapshot. diff --git a/docs/reference/migration/migrate_7_0/suggesters.asciidoc b/docs/reference/migration/migrate_7_0/suggesters.asciidoc index 1ae0d6f2c78..213958382aa 100644 --- a/docs/reference/migration/migrate_7_0/suggesters.asciidoc +++ b/docs/reference/migration/migrate_7_0/suggesters.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_suggesters_changes]] === Suggesters changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Registration of suggesters in plugins has changed From a9416708d00762862636bf3b8e4a117cae4eb357 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 7 Apr 2019 13:32:54 -0400 Subject: [PATCH 42/45] Ensure relocation occur in testRelocationWithConcurrentIndexing (#40801) If the relocation is throttled, the subsequent search request on the target node (i.e., with preference _only_nodes=target_node) will fail because some shards have not moved to that node yet. With this change, we will wait for the relocation happens by busily checking the routing table of the testing index on the target node. Closes #34950 --- .../java/org/elasticsearch/upgrades/RecoveryIT.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index a2ad99482b5..40ecb789352 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -42,6 +42,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; @@ -51,6 +52,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAlloc import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -206,7 +208,6 @@ public class RecoveryIT extends AbstractRollingTestCase { return null; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34950") public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; switch (CLUSTER_TYPE) { @@ -240,6 +241,15 @@ public class RecoveryIT extends AbstractRollingTestCase { ensureNoInitializingShards(); // wait for all other shard activity to finish updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._id", newNode)); asyncIndexDocs(index, 10, 50).get(); + // ensure the relocation from old node to new node has occurred; otherwise ensureGreen can + // return true even though shards haven't moved to the new node yet (allocation was throttled). + assertBusy(() -> { + Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + String xpath = "routing_table.indices." + index + ".shards.0.node"; + @SuppressWarnings("unchecked") List assignedNodes = (List) XContentMapValues.extractValue(xpath, state); + assertNotNull(state.toString(), assignedNodes); + assertThat(state.toString(), newNode, isIn(assignedNodes)); + }, 60, TimeUnit.SECONDS); ensureGreen(index); client().performRequest(new Request("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + newNode, 60); From 713e5c987b72f0b94944caa9000e6879bc82374f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 8 Apr 2019 15:42:48 -0400 Subject: [PATCH 43/45] Adjust init map size of user data of index commit (#40965) The number of user data attributes of an index commit has increased from 6 to 8, but we forgot to adjust. This change increases the initial size of that map to avoid resizing. --- .../java/org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 47bc6e307e3..86dea798dc2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2360,7 +2360,7 @@ public class InternalEngine extends Engine { * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time * of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene). */ - final Map commitData = new HashMap<>(6); + final Map commitData = new HashMap<>(8); commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGeneration); commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpointValue); @@ -2612,7 +2612,7 @@ public class InternalEngine extends Engine { * Gets the commit data from {@link IndexWriter} as a map. */ private static Map commitDataAsMap(final IndexWriter indexWriter) { - Map commitData = new HashMap<>(6); + final Map commitData = new HashMap<>(8); for (Map.Entry entry : indexWriter.getLiveCommitData()) { commitData.put(entry.getKey(), entry.getValue()); } From 69421612e53295603c9ecd1589e134e4b6e18b8f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 8 Apr 2019 22:00:48 -0400 Subject: [PATCH 44/45] Mute testRecoverMissingAnalyzer Tracked at #40867 --- .../test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index fa6469037b0..3402622c78a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -407,6 +407,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { * if it is sane and if we can successfully create an IndexService. * This also includes plugins etc. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40867") public void testRecoverMissingAnalyzer() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); From 64c98c632a20902eeeeb4e36c762954b66b81cfd Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 8 Apr 2019 21:17:49 -0700 Subject: [PATCH 45/45] Ignore failing tests (#40994) --- x-pack/plugin/sql/qa/src/main/resources/date.csv-spec | 2 +- x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec index 5b9e9a3fcfb..f417c15d8f0 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec @@ -35,7 +35,7 @@ SELECT TRUNCATE(YEAR(TODAY() - INTERVAL 50 YEARS) / 1000) AS result; ; -currentDateFilter +currentDateFilter-Ignore SELECT first_name FROM test_emp WHERE hire_date > CURRENT_DATE() - INTERVAL 25 YEARS ORDER BY first_name ASC LIMIT 10; first_name diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 91bb06f0903..0389fdc43f3 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -2400,7 +2400,7 @@ SELECT TODAY() AS result; // end::todayFunction ; -filterToday +filterToday-Ignore // tag::filterToday SELECT first_name FROM emp WHERE hire_date > TODAY() - INTERVAL 25 YEARS ORDER BY first_name ASC LIMIT 5;