diff --git a/TESTING.asciidoc b/TESTING.asciidoc index dcd6c9981be..aa5431ed69b 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -430,15 +430,33 @@ cd $BATS_ARCHIVES sudo -E bats $BATS_TESTS/*.bats ------------------------------------------------- -Note: Starting vagrant VM outside of the elasticsearch folder requires to -indicates the folder that contains the Vagrantfile using the VAGRANT_CWD -environment variable: +You can also use Gradle to prepare the test environment and then starts a single VM: ------------------------------------------------- -gradle vagrantSetUp -VAGRANT_CWD=/path/to/elasticsearch vagrant up centos-7 --provider virtualbox +gradle vagrantFedora24#up ------------------------------------------------- +Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up, +vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1204#up, +vagrantUbuntu1604#up. + +Once up, you can then connect to the VM using SSH from the elasticsearch directory: + +------------------------------------------------- +vagrant ssh fedora-24 +------------------------------------------------- + +Or from another directory: + +------------------------------------------------- +VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-24 +------------------------------------------------- + +Note: Starting vagrant VM outside of the elasticsearch folder requires to +indicates the folder that contains the Vagrantfile using the VAGRANT_CWD +environment variable. + + == Coverage analysis Tests can be run instrumented with jacoco to produce a coverage report in diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy index 176b02cf9b0..47a559efccb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy @@ -39,7 +39,7 @@ public class RestTestPlugin implements Plugin { if (false == REQUIRED_PLUGINS.any {project.pluginManager.hasPlugin(it)}) { throw new InvalidUserDataException('elasticsearch.rest-test ' + 'requires either elasticsearch.build or ' - + 'elasticsearch.standalone-test') + + 'elasticsearch.standalone-rest-test') } RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 6e017671017..c48dc890ab0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -40,9 +40,9 @@ public class StandaloneRestTestPlugin implements Plugin { @Override public void apply(Project project) { if (project.pluginManager.hasPlugin('elasticsearch.build')) { - throw new InvalidUserDataException('elasticsearch.standalone-test, ' - + 'elasticsearch.standalone-test, and elasticsearch.build are ' - + 'mutually exclusive') + throw new InvalidUserDataException('elasticsearch.standalone-test ' + + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' + + 'are mutually exclusive') } project.pluginManager.apply(JavaBasePlugin) project.pluginManager.apply(RandomizedTestingPlugin) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index a5bb054a8b6..0b7a105e8ab 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -404,10 +404,6 @@ class VagrantTestPlugin implements Plugin { args 'halt', box } stop.dependsOn(halt) - if (project.extensions.esvagrant.boxes.contains(box) == false) { - // we only need a halt task if this box was not specified - continue; - } Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { boxName box @@ -435,6 +431,11 @@ class VagrantTestPlugin implements Plugin { dependsOn update } + if (project.extensions.esvagrant.boxes.contains(box) == false) { + // we d'ont need tests tasks if this box was not specified + continue; + } + Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) { environment vagrantEnvVars dependsOn up diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b540be68565..51b1be9bceb 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -443,7 +443,6 @@ - @@ -468,7 +467,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index ee55fd9c2ad..2ea22803356 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -24,9 +24,12 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.IndexSettings; @@ -39,11 +42,23 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.Locale; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * A base class for the response of a write operation that involves a single doc */ public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject { + private static final String _SHARDS = "_shards"; + private static final String _INDEX = "_index"; + private static final String _TYPE = "_type"; + private static final String _ID = "_id"; + private static final String _VERSION = "_version"; + private static final String _SEQ_NO = "_seq_no"; + private static final String RESULT = "result"; + private static final String FORCED_REFRESH = "forced_refresh"; + /** * An enum that represents the the results of CRUD operations, primarily used to communicate the type of * operation that occurred. @@ -253,18 +268,32 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { ReplicationResponse.ShardInfo shardInfo = getShardInfo(); - builder.field("_index", shardId.getIndexName()) - .field("_type", type) - .field("_id", id) - .field("_version", version) - .field("result", getResult().getLowercase()); + builder.field(_INDEX, shardId.getIndexName()) + .field(_TYPE, type) + .field(_ID, id) + .field(_VERSION, version) + .field(RESULT, getResult().getLowercase()); if (forcedRefresh) { - builder.field("forced_refresh", true); + builder.field(FORCED_REFRESH, true); } - shardInfo.toXContent(builder, params); + builder.field(_SHARDS, shardInfo); if (getSeqNo() >= 0) { - builder.field("_seq_no", getSeqNo()); + builder.field(_SEQ_NO, getSeqNo()); } return builder; } + + /** + * Declare the {@link ObjectParser} fields to use when parsing a {@link DocWriteResponse} + */ + protected static void declareParserFields(ConstructingObjectParser objParser) { + objParser.declareString(constructorArg(), new ParseField(_INDEX)); + objParser.declareString(constructorArg(), new ParseField(_TYPE)); + objParser.declareString(constructorArg(), new ParseField(_ID)); + objParser.declareLong(constructorArg(), new ParseField(_VERSION)); + objParser.declareString(constructorArg(), new ParseField(RESULT)); + objParser.declareLong(optionalConstructorArg(), new ParseField(_SEQ_NO)); + objParser.declareBoolean(DocWriteResponse::setForcedRefresh, new ParseField(FORCED_REFRESH)); + objParser.declareObject(DocWriteResponse::setShardInfo, (p, c) -> ShardInfo.fromXContent(p), new ParseField(_SHARDS)); + } } diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java index c508546b433..219e81b9622 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -20,13 +20,21 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + /** * A response of an index operation, * @@ -35,6 +43,8 @@ import java.io.IOException; */ public class IndexResponse extends DocWriteResponse { + private static final String CREATED = "created"; + public IndexResponse() { } @@ -64,7 +74,34 @@ public class IndexResponse extends DocWriteResponse { @Override public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { super.innerToXContent(builder, params); - builder.field("created", result == Result.CREATED); + builder.field(CREATED, result == Result.CREATED); return builder; } + + /** + * ConstructingObjectParser used to parse the {@link IndexResponse}. We use a ObjectParser here + * because most fields are parsed by the parent abstract class {@link DocWriteResponse} and it's + * not easy to parse part of the fields in the parent class and other fields in the children class + * using the usual streamed parsing method. + */ + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(IndexResponse.class.getName(), + args -> { + // index uuid and shard id are unknown and can't be parsed back for now. + ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1); + String type = (String) args[1]; + String id = (String) args[2]; + long version = (long) args[3]; + long seqNo = (args[5] != null) ? (long) args[5] : SequenceNumbersService.UNASSIGNED_SEQ_NO; + boolean created = (boolean) args[6]; + return new IndexResponse(shardId, type, id, seqNo, version, created); + }); + DocWriteResponse.declareParserFields(PARSER); + PARSER.declareBoolean(constructorArg(), new ParseField(CREATED)); + } + + public static IndexResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 2479ff86750..6e9d762da8f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -292,7 +292,7 @@ abstract class AbstractSearchAsyncAction private void raiseEarlyFailure(Exception e) { for (AtomicArray.Entry entry : firstResults.asList()) { try { - DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().getNodeId()); sendReleaseSearchContext(entry.value.id(), node); } catch (Exception inner) { inner.addSuppressed(e); @@ -317,7 +317,7 @@ abstract class AbstractSearchAsyncAction if (queryResult.hasHits() && docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs try { - DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().getNodeId()); sendReleaseSearchContext(entry.value.queryResult().id(), node); } catch (Exception e) { logger.trace("failed to release context", e); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 9db3a21c485..5d73120efab 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -75,7 +75,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().getNodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest); } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 3fe24cc9911..c25f65dfa84 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -84,7 +84,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().getNodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } @@ -155,7 +155,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); - DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().getNodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 92270c6fe36..e9c9968ecb8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -71,9 +71,9 @@ import java.util.stream.StreamSupport; public class SearchPhaseController extends AbstractComponent { private static final Comparator> QUERY_RESULT_ORDERING = (o1, o2) -> { - int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); + int i = o1.value.shardTarget().getIndex().compareTo(o2.value.shardTarget().getIndex()); if (i == 0) { - i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); + i = o1.value.shardTarget().getShardId().id() - o2.value.shardTarget().getShardId().id(); } return i; }; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 7b300063291..b36728af7b0 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -90,7 +90,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResultProvider queryResult = firstResults.get(entry.index); - DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().getNodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 851e3343bc2..f4cb0c40d4c 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -185,7 +185,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { final QuerySearchResult querySearchResult = queryResults.get(entry.index); ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index]; ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc); - DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId()); + DiscoveryNode node = nodes.get(querySearchResult.shardTarget().getNodeId()); searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, new ActionListener() { @Override public void onResponse(FetchSearchResult result) { diff --git a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 8070081dcd8..14301463958 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -93,7 +93,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public String index() { if (shardTarget != null) { - return shardTarget.index(); + return shardTarget.getIndex(); } return null; } @@ -104,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public int shardId() { if (shardTarget != null) { - return shardTarget.shardId().id(); + return shardTarget.getShardId().id(); } return -1; } @@ -156,7 +156,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { builder.field("shard", shardId()); builder.field("index", index()); if (shardTarget != null) { - builder.field("node", shardTarget.nodeId()); + builder.field("node", shardTarget.getNodeId()); } if (cause != null) { builder.field("reason"); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index a09a651086b..389ee140af2 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -21,11 +21,9 @@ package org.elasticsearch.action.search; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.RAMOutputStream; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.ShardSearchTransportRequest; import java.io.IOException; import java.util.Base64; @@ -53,7 +51,7 @@ final class TransportSearchHelper { for (AtomicArray.Entry entry : searchPhaseResults.asList()) { SearchPhaseResult searchPhaseResult = entry.value; out.writeLong(searchPhaseResult.id()); - out.writeString(searchPhaseResult.shardTarget().nodeId()); + out.writeString(searchPhaseResult.shardTarget().getNodeId()); } byte[] bytes = new byte[(int) out.getFilePointer()]; out.writeTo(bytes, 0); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index 2f701c87286..91c3089de20 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -73,7 +73,7 @@ public class ReplicationResponse extends ActionResponse { this.shardInfo = shardInfo; } - public static class ShardInfo implements Streamable, ToXContent { + public static class ShardInfo implements Streamable, ToXContentObject { private static final String _SHARDS = "_shards"; private static final String TOTAL = "total"; @@ -179,7 +179,7 @@ public class ReplicationResponse extends ActionResponse { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(_SHARDS); + builder.startObject(); builder.field(TOTAL, total); builder.field(SUCCESSFUL, successful); builder.field(FAILED, getFailed()); @@ -195,18 +195,12 @@ public class ReplicationResponse extends ActionResponse { } public static ShardInfo fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); - - String currentFieldName = parser.currentName(); - if (_SHARDS.equals(currentFieldName) == false) { - throwUnknownField(currentFieldName, parser.getTokenLocation()); - } - token = parser.nextToken(); + XContentParser.Token token = parser.currentToken(); ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); int total = 0, successful = 0; List failuresList = null; + String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); diff --git a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java index 6c3aa071ba3..e4f26b15702 100644 --- a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -28,12 +28,14 @@ import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.Client; import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskListener; import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; +import java.util.function.Supplier; /** * Client that executes actions on the local node. @@ -41,13 +43,19 @@ import java.util.Map; public class NodeClient extends AbstractClient { private Map actions; + /** + * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by + * {@link #executeLocally(GenericAction, ActionRequest, TaskListener)}. + */ + private Supplier localNodeId; public NodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); } - public void initialize(Map actions) { + public void initialize(Map actions, Supplier localNodeId) { this.actions = actions; + this.localNodeId = localNodeId; } @Override @@ -85,6 +93,14 @@ public class NodeClient extends AbstractClient { return transportAction(action).execute(request, listener); } + /** + * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by + * {@link #executeLocally(GenericAction, ActionRequest, TaskListener)}. + */ + public String getLocalNodeId() { + return localNodeId.get(); + } + /** * Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available. */ diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 94333c10dde..bda1481130c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -36,7 +36,9 @@ import org.elasticsearch.discovery.zen.NodesFaultDetection; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ScheduledFuture; @@ -76,20 +78,26 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); } - public void connectToNodes(List addedNodes) { + public void connectToNodes(Iterable discoveryNodes) { // TODO: do this in parallel (and wait) - for (final DiscoveryNode node : addedNodes) { + for (final DiscoveryNode node : discoveryNodes) { try (Releasable ignored = nodeLocks.acquire(node)) { - Integer current = nodes.put(node, 0); - assert current == null : "node " + node + " was added in event but already in internal nodes"; + nodes.putIfAbsent(node, 0); validateNodeConnected(node); } } } - public void disconnectFromNodes(List removedNodes) { - for (final DiscoveryNode node : removedNodes) { + /** + * Disconnects from all nodes except the ones provided as parameter + */ + public void disconnectFromNodesExcept(Iterable nodesToKeep) { + Set currentNodes = new HashSet<>(nodes.keySet()); + for (DiscoveryNode node : nodesToKeep) { + currentNodes.remove(node); + } + for (final DiscoveryNode node : currentNodes) { try (Releasable ignored = nodeLocks.acquire(node)) { Integer current = nodes.remove(node); assert current != null : "node " + node + " was removed in event but not in internal nodes"; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index d4fff64530e..378fa924627 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -772,7 +772,7 @@ public class ClusterService extends AbstractLifecycleComponent { taskOutputs.createAckListener(threadPool, newClusterState) : null; - nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes()); + nodeConnectionsService.connectToNodes(newClusterState.nodes()); // if we are the master, publish the new state to all nodes // we publish here before we send a notification to all the listeners, since if it fails @@ -788,7 +788,8 @@ public class ClusterService extends AbstractLifecycleComponent { "failing [{}]: failed to commit cluster state version [{}]", taskInputs.summary, version), t); // ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state - nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes()); + nodeConnectionsService.connectToNodes(previousClusterState.nodes()); + nodeConnectionsService.disconnectFromNodesExcept(previousClusterState.nodes()); taskOutputs.publishingFailed(t); return; } @@ -808,7 +809,7 @@ public class ClusterService extends AbstractLifecycleComponent { logger.debug("set local cluster state to version {}", newClusterState.version()); callClusterStateAppliers(newClusterState, clusterChangedEvent); - nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); + nodeConnectionsService.disconnectFromNodesExcept(newClusterState.nodes()); updateState(css -> newClusterState); diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index 548c1da5a8c..16f47f78ddb 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -38,8 +38,8 @@ public final class ESLoggerFactory { public static final Setting LOG_DEFAULT_LEVEL_SETTING = new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope); public static final Setting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", Level.INFO.name(), Level::valueOf, - Property.Dynamic, Property.NodeScope); + Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Property.Dynamic, + Property.NodeScope)); public static Logger getLogger(String prefix, String name) { return getLogger(prefix, LogManager.getLogger(name)); diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 89a56f03ecc..95333a988cd 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -195,6 +195,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent { addSettingsUpdater(setting.newUpdater(consumer, logger, validator)); } + /** + * Adds a settings consumer for affix settings. Affix settings have a namespace associated to it that needs to be available to the + * consumer in order to be processed correctly. + */ + public synchronized void addAffixUpdateConsumer(Setting.AffixSetting setting, BiConsumer consumer, + BiConsumer validator) { + final Setting registeredSetting = this.complexMatchers.get(setting.getKey()); + if (setting != registeredSetting) { + throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); + } + addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); + } + synchronized void addSettingsUpdater(SettingUpdater updater) { this.settingUpdaters.add(updater); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 4ed64b77c4a..d3bc4ebaf0b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -42,14 +42,17 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. @@ -410,8 +413,8 @@ public class Setting extends ToXContentToBytes { } /** - * Updates settings that depend on eachother. See {@link AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} - * and its usage for details. + * Updates settings that depend on each other. + * See {@link AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and its usage for details. */ static AbstractScopedSettings.SettingUpdater> compoundUpdater(final BiConsumer consumer, final Setting aSetting, final Setting bSetting, Logger logger) { @@ -446,6 +449,76 @@ public class Setting extends ToXContentToBytes { }; } + public static class AffixSetting extends Setting { + private final AffixKey key; + private final Function> delegateFactory; + + public AffixSetting(AffixKey key, Setting delegate, Function> delegateFactory) { + super(key, delegate.defaultValue, delegate.parser, delegate.properties.toArray(new Property[0])); + this.key = key; + this.delegateFactory = delegateFactory; + } + + boolean isGroupSetting() { + return true; + } + + private Stream matchStream(Settings settings) { + return settings.getAsMap().keySet().stream().filter((key) -> match(key)).map(settingKey -> key.getConcreteString(settingKey)); + } + + AbstractScopedSettings.SettingUpdater, T>> newAffixUpdater( + BiConsumer consumer, Logger logger, BiConsumer validator) { + return new AbstractScopedSettings.SettingUpdater, T>>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + return Stream.concat(matchStream(current), matchStream(previous)).findAny().isPresent(); + } + + @Override + public Map, T> getValue(Settings current, Settings previous) { + // we collect all concrete keys and then delegate to the actual setting for validation and settings extraction + final Map, T> result = new IdentityHashMap<>(); + Stream.concat(matchStream(current), matchStream(previous)).forEach(aKey -> { + String namespace = key.getNamespace(aKey); + AbstractScopedSettings.SettingUpdater updater = + getConcreteSetting(aKey).newUpdater((v) -> consumer.accept(namespace, v), logger, + (v) -> validator.accept(namespace, v)); + if (updater.hasChanged(current, previous)) { + // only the ones that have changed otherwise we might get too many updates + // the hasChanged above checks only if there are any changes + T value = updater.getValue(current, previous); + result.put(updater, value); + } + }); + return result; + } + + @Override + public void apply(Map, T> value, Settings current, Settings previous) { + for (Map.Entry, T> entry : value.entrySet()) { + entry.getKey().apply(entry.getValue(), current, previous); + } + } + }; + } + + @Override + public Setting getConcreteSetting(String key) { + if (match(key)) { + return delegateFactory.apply(key); + } else { + throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); + } + } + + @Override + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + matchStream(defaultSettings).forEach((key) -> getConcreteSetting(key).diff(builder, source, defaultSettings)); + } + } + private final class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; @@ -727,7 +800,6 @@ public class Setting extends ToXContentToBytes { } } - private static String arrayToParsableString(String[] array) { try { XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -741,9 +813,11 @@ public class Setting extends ToXContentToBytes { throw new ElasticsearchException(ex); } } + public static Setting groupSetting(String key, Property... properties) { return groupSetting(key, (s) -> {}, properties); } + public static Setting groupSetting(String key, Consumer validator, Property... properties) { return new Setting(new GroupKey(key), (s) -> "", (s) -> null, properties) { @Override @@ -894,59 +968,24 @@ public class Setting extends ToXContentToBytes { * can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless * {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, - Property... properties) { - return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties); + public static AffixSetting prefixKeySetting(String prefix, Function> delegateFactory) { + return affixKeySetting(new AffixKey(prefix), delegateFactory); } /** * This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance - * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters + * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, affix key settings don't support updaters * out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting affixKeySetting(String prefix, String suffix, Function defaultValue, - Function parser, Property... properties) { - return affixKeySetting(AffixKey.withAffix(prefix, suffix), defaultValue, parser, properties); + public static AffixSetting affixKeySetting(String prefix, String suffix, Function> delegateFactory) { + return affixKeySetting(new AffixKey(prefix, suffix), delegateFactory); } - public static Setting affixKeySetting(String prefix, String suffix, String defaultValue, Function parser, - Property... properties) { - return affixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); - } + private static AffixSetting affixKeySetting(AffixKey key, Function> delegateFactory) { + Setting delegate = delegateFactory.apply("_na_"); + return new AffixSetting<>(key, delegate, delegateFactory); + }; - public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, - Property... properties) { - return new Setting(key, defaultValue, parser, properties) { - - @Override - boolean isGroupSetting() { - return true; - } - - @Override - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger, Consumer validator) { - throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating."); - } - - @Override - public Setting getConcreteSetting(String key) { - if (match(key)) { - return new Setting<>(key, defaultValue, parser, properties); - } else { - throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); - } - } - - @Override - public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { - for (Map.Entry entry : defaultSettings.getAsMap().entrySet()) { - if (match(entry.getKey())) { - getConcreteSetting(entry.getKey()).diff(builder, source, defaultSettings); - } - } - } - }; - } public interface Key { @@ -1012,37 +1051,60 @@ public class Setting extends ToXContentToBytes { } } + /** + * A key that allows for static pre and suffix. This is used for settings + * that have dynamic namespaces like for different accounts etc. + */ public static final class AffixKey implements Key { - public static AffixKey withPrefix(String prefix) { - return new AffixKey(prefix, null); - } - - public static AffixKey withAffix(String prefix, String suffix) { - return new AffixKey(prefix, suffix); - } - + private final Pattern pattern; private final String prefix; private final String suffix; - public AffixKey(String prefix, String suffix) { + AffixKey(String prefix) { + this(prefix, null); + } + + AffixKey(String prefix, String suffix) { assert prefix != null || suffix != null: "Either prefix or suffix must be non-null"; + this.prefix = prefix; if (prefix.endsWith(".") == false) { throw new IllegalArgumentException("prefix must end with a '.'"); } this.suffix = suffix; + if (suffix == null) { + pattern = Pattern.compile("(" + Pattern.quote(prefix) + "((?:[-\\w]+[.])*[-\\w]+$))"); + } else { + // the last part of this regexp is for lists since they are represented as x.${namespace}.y.1, x.${namespace}.y.2 + pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\.\\d+)?"); + } } @Override public boolean match(String key) { - boolean match = true; - if (prefix != null) { - match = key.startsWith(prefix); + return pattern.matcher(key).matches(); + } + + /** + * Returns a string representation of the concrete setting key + */ + String getConcreteString(String key) { + Matcher matcher = pattern.matcher(key); + if (matcher.matches() == false) { + throw new IllegalStateException("can't get concrete string for key " + key + " key doesn't match"); } - if (suffix != null) { - match = match && key.endsWith(suffix); + return matcher.group(1); + } + + /** + * Returns a string representation of the concrete setting key + */ + String getNamespace(String key) { + Matcher matcher = pattern.matcher(key); + if (matcher.matches() == false) { + throw new IllegalStateException("can't get concrete string for key " + key + " key doesn't match"); } - return match; + return matcher.group(2); } public SimpleKey toConcreteKey(String missingPart) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java index 956a0d85de8..fec83eefbdf 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; @@ -56,6 +57,14 @@ public final class XContentParserUtils { throw new ParsingException(location, String.format(Locale.ROOT, message, field)); } + /** + * @throws ParsingException with a "unknown token found" reason + */ + public static void throwUnknownToken(XContentParser.Token token, XContentLocation location) { + String message = "Failed to parse object: unexpected token [%s] found"; + throw new ParsingException(location, String.format(Locale.ROOT, message, token)); + } + /** * Makes sure that provided token is of the expected type * @@ -67,4 +76,35 @@ public final class XContentParserUtils { throw new ParsingException(location.get(), String.format(Locale.ROOT, message, expected, actual)); } } + + /** + * Parse the current token depending on its token type. The following token types will be + * parsed by the corresponding parser methods: + *
    + *
  • XContentParser.Token.VALUE_STRING: parser.text()
  • + *
  • XContentParser.Token.VALUE_NUMBER: parser.numberValue()
  • + *
  • XContentParser.Token.VALUE_BOOLEAN: parser.booleanValue()
  • + *
  • XContentParser.Token.VALUE_EMBEDDED_OBJECT: parser.binaryValue()
  • + *
+ * + * @throws ParsingException if the token none of the allowed values + */ + public static Object parseStoredFieldsValue(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + Object value = null; + if (token == XContentParser.Token.VALUE_STRING) { + //binary values will be parsed back and returned as base64 strings when reading from json and yaml + value = parser.text(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + value = parser.numberValue(); + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + value = parser.booleanValue(); + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + //binary values will be parsed back and returned as BytesArray when reading from cbor and smile + value = new BytesArray(parser.binaryValue()); + } else { + throwUnknownToken(token, parser.getTokenLocation()); + } + return value; + } } diff --git a/core/src/main/java/org/elasticsearch/index/get/GetField.java b/core/src/main/java/org/elasticsearch/index/get/GetField.java index 3a0fa14acee..928988e3d3d 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetField.java +++ b/core/src/main/java/org/elasticsearch/index/get/GetField.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.get; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -36,6 +34,7 @@ import java.util.List; import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseStoredFieldsValue; public class GetField implements Streamable, ToXContent, Iterable { @@ -119,21 +118,7 @@ public class GetField implements Streamable, ToXContent, Iterable { ensureExpectedToken(XContentParser.Token.START_ARRAY, token, parser::getTokenLocation); List values = new ArrayList<>(); while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - Object value; - if (token == XContentParser.Token.VALUE_STRING) { - //binary values will be parsed back and returned as base64 strings when reading from json and yaml - value = parser.text(); - } else if (token == XContentParser.Token.VALUE_NUMBER) { - value = parser.numberValue(); - } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - value = parser.booleanValue(); - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - //binary values will be parsed back and returned as BytesArray when reading from cbor and smile - value = new BytesArray(parser.binaryValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "Failed to parse object: unsupported token found [" + token + "]"); - } - values.add(value); + values.add(parseStoredFieldsValue(parser)); } return new GetField(fieldName, values); } diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 1fbeb81febc..738bfee061f 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.GraphQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PhraseQuery; @@ -48,6 +49,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; import java.io.IOException; +import java.util.List; public class MatchQuery { @@ -316,6 +318,21 @@ public class MatchQuery { public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) { final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop); + if (query instanceof GraphQuery) { + // we have a graph query, convert inner queries to multi phrase prefix queries + List oldQueries = ((GraphQuery) query).getQueries(); + Query[] queries = new Query[oldQueries.size()]; + for (int i = 0; i < queries.length; i++) { + queries[i] = toMultiPhrasePrefix(oldQueries.get(i), phraseSlop, maxExpansions); + } + + return new GraphQuery(queries); + } + + return toMultiPhrasePrefix(query, phraseSlop, maxExpansions); + } + + private Query toMultiPhrasePrefix(final Query query, int phraseSlop, int maxExpansions) { float boost = 1; Query innerQuery = query; while (innerQuery instanceof BoostQuery) { @@ -357,18 +374,38 @@ public class MatchQuery { Query booleanQuery = createBooleanQuery(field, queryText, lowFreqOccur); if (booleanQuery != null && booleanQuery instanceof BooleanQuery) { BooleanQuery bq = (BooleanQuery) booleanQuery; - ExtendedCommonTermsQuery query = new ExtendedCommonTermsQuery(highFreqOccur, lowFreqOccur, maxTermFrequency, ( - (BooleanQuery) booleanQuery).isCoordDisabled(), fieldType); - for (BooleanClause clause : bq.clauses()) { - if (!(clause.getQuery() instanceof TermQuery)) { - return booleanQuery; + return boolToExtendedCommonTermsQuery(bq, highFreqOccur, lowFreqOccur, maxTermFrequency, fieldType); + } else if (booleanQuery != null && booleanQuery instanceof GraphQuery && ((GraphQuery) booleanQuery).hasBoolean()) { + // we have a graph query that has at least one boolean sub-query + // re-build and use extended common terms + List oldQueries = ((GraphQuery) booleanQuery).getQueries(); + Query[] queries = new Query[oldQueries.size()]; + for (int i = 0; i < queries.length; i++) { + Query oldQuery = oldQueries.get(i); + if (oldQuery instanceof BooleanQuery) { + queries[i] = boolToExtendedCommonTermsQuery((BooleanQuery) oldQuery, highFreqOccur, lowFreqOccur, maxTermFrequency, fieldType); + } else { + queries[i] = oldQuery; } - query.add(((TermQuery) clause.getQuery()).getTerm()); } - return query; - } - return booleanQuery; + return new GraphQuery(queries); + } + + return booleanQuery; + } + + private Query boolToExtendedCommonTermsQuery(BooleanQuery bq, Occur highFreqOccur, Occur lowFreqOccur, float + maxTermFrequency, MappedFieldType fieldType) { + ExtendedCommonTermsQuery query = new ExtendedCommonTermsQuery(highFreqOccur, lowFreqOccur, maxTermFrequency, + bq.isCoordDisabled(), fieldType); + for (BooleanClause clause : bq.clauses()) { + if (!(clause.getQuery() instanceof TermQuery)) { + return bq; + } + query.add(((TermQuery) clause.getQuery()).getTerm()); + } + return query; } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java index a9bc63ae44f..a806c414e9a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.shard; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -71,6 +72,22 @@ public class ShardId implements Streamable, Comparable { return "[" + index.getName() + "][" + shardId + "]"; } + /** + * Parse the string representation of this shardId back to an object. + * We lose index uuid information here, but since we use toString in + * rest responses, this is the best we can do to reconstruct the object + * on the client side. + */ + public static ShardId fromString(String shardIdString) { + int splitPosition = shardIdString.indexOf("]["); + if (splitPosition <= 0 || shardIdString.charAt(0) != '[' || shardIdString.charAt(shardIdString.length() - 1) != ']') { + throw new IllegalArgumentException("Unexpected shardId string format, expected [indexName][shardId] but got " + shardIdString); + } + String indexName = shardIdString.substring(1, splitPosition); + int shardId = Integer.parseInt(shardIdString.substring(splitPosition + 2, shardIdString.length() - 1)); + return new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index bdbce03bda1..fa41824f4de 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -855,7 +855,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC in.readLong(); // ttl } this.versionType = VersionType.fromValue(in.readByte()); - assert versionType.validateVersionForWrites(this.version); + assert versionType.validateVersionForWrites(this.version) : "invalid version for writes: " + this.version; if (format >= FORMAT_AUTO_GENERATED_IDS) { this.autoGeneratedIdTimestamp = in.readLong(); } else { @@ -1036,8 +1036,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); if (format >= FORMAT_SEQ_NO) { - seqNo = in.readVLong(); - primaryTerm = in.readVLong(); + seqNo = in.readLong(); + primaryTerm = in.readLong(); } } @@ -1100,8 +1100,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC out.writeString(uid.text()); out.writeLong(version); out.writeByte(versionType.getValue()); - out.writeVLong(seqNo); - out.writeVLong(primaryTerm); + out.writeLong(seqNo); + out.writeLong(primaryTerm); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3ed9282be59..2f74bd0fbd4 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -222,7 +222,7 @@ public class RecoverySourceHandler { final long numDocsSource = recoverySourceMetadata.getNumDocs(); if (numDocsTarget != numDocsSource) { throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + - "of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource + "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget + "(" + request.targetNode().getName() + ")"); } // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target. diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index f77e56705df..ebd12142917 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -119,7 +119,6 @@ import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.snapshots.SnapshotShardsService; @@ -379,8 +378,7 @@ public class Node implements Closeable { Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, - scriptModule.getScriptService(), searchModule.getSearchRequestParsers(), - xContentRegistry).stream()) + scriptModule.getScriptService(), xContentRegistry).stream()) .collect(Collectors.toList()); Collection>> customMetaDataUpgraders = pluginsService.filterPlugins(Plugin.class).stream() @@ -410,7 +408,6 @@ public class Node implements Closeable { final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class)); modules.add(b -> { - b.bind(SearchRequestParsers.class).toInstance(searchModule.getSearchRequestParsers()); b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); b.bind(PluginsService.class).toInstance(pluginsService); b.bind(Client.class).toInstance(client); @@ -463,7 +460,8 @@ public class Node implements Closeable { .map(injector::getInstance).collect(Collectors.toList())); resourcesToClose.addAll(pluginLifecycleComponents); this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); - client.initialize(injector.getInstance(new Key>() {})); + client.initialize(injector.getInstance(new Key>() {}), + () -> clusterService.localNode().getId()); logger.info("initialized"); diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java index e7d97b0724e..87c5ef9a8c6 100644 --- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -42,7 +42,6 @@ import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -103,11 +102,10 @@ public abstract class Plugin implements Closeable { * @param threadPool A service to allow retrieving an executor to run an async action * @param resourceWatcherService A service to watch for changes to node local files * @param scriptService A service to allow running scripts on the local node - * @param searchRequestParsers Parsers for search requests which may be used to templatize search requests */ public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, ResourceWatcherService resourceWatcherService, ScriptService scriptService, - SearchRequestParsers searchRequestParsers, NamedXContentRegistry xContentRegistry) { + NamedXContentRegistry xContentRegistry) { return Collections.emptyList(); } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 0ac2d7bade1..65153a9f586 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -176,8 +176,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final String SNAPSHOT_CODEC = "snapshot"; - static final String SNAPSHOTS_FILE = "index"; // package private for unit testing - private static final String INDEX_FILE_PREFIX = "index-"; private static final String INDEX_LATEST_BLOB = "index.latest"; @@ -373,7 +371,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobPath indexPath = basePath().add("indices").add(indexId.getId()); BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); try { - indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getUUID()); + indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); } catch (IOException ex) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); } @@ -421,7 +419,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp if (snapshotInfo != null) { // we know the version the snapshot was created with try { - snapshotFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId); + snapshotFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e); } @@ -439,7 +437,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp if (snapshotInfo != null) { // we know the version the snapshot was created with try { - globalMetaDataFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId); + globalMetaDataFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e); } @@ -522,7 +520,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } try { - metaData = globalMetaDataFormat(snapshotVersion).read(snapshotsBlobContainer, snapshotId.getUUID()); + metaData = globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException ex) { @@ -533,7 +531,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobPath indexPath = basePath().add("indices").add(index.getId()); BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); try { - metaDataBuilder.put(indexMetaDataFormat(snapshotVersion).read(indexMetaDataBlobContainer, snapshotId.getUUID()), false); + metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false); } catch (ElasticsearchParseException | IOException ex) { if (ignoreIndexErrors) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); @@ -563,27 +561,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } - /** - * Returns appropriate global metadata format based on the provided version of the snapshot - */ - private BlobStoreFormat globalMetaDataFormat(Version version) { - return globalMetaDataFormat; - } - - /** - * Returns appropriate snapshot format based on the provided version of the snapshot - */ - private BlobStoreFormat snapshotFormat(Version version) { - return snapshotFormat; - } - - /** - * Returns appropriate index metadata format based on the provided version of the snapshot - */ - private BlobStoreFormat indexMetaDataFormat(Version version) { - return indexMetaDataFormat; - } - @Override public long getSnapshotThrottleTimeInNanos() { return snapshotRateLimitingTimeInNanos.count(); @@ -643,6 +620,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // EMPTY is safe here because RepositoryData#fromXContent calls namedObject try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) { repositoryData = RepositoryData.snapshotsFromXContent(parser, indexGen); + } catch (NotXContentException e) { + logger.warn("[{}] index blob is not valid x-content [{} bytes]", snapshotsIndexBlobName, out.bytes().length()); + throw e; } } diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index ba952e23c23..33b73a6ff83 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -123,29 +123,21 @@ public class BytesRestResponse extends RestResponse { } else if (channel.detailedErrorsEnabled()) { final ToXContent.Params params; if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) { - params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request()); + params = new ToXContent.DelegatingMapParams( + Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request()); } else { if (status.getStatus() < 500) { - SUPPRESSED_ERROR_LOGGER.debug((Supplier) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e); + SUPPRESSED_ERROR_LOGGER.debug( + (Supplier) () -> new ParameterizedMessage("path: {}, params: {}", + channel.request().rawPath(), channel.request().params()), e); } else { - SUPPRESSED_ERROR_LOGGER.warn((Supplier) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e); + SUPPRESSED_ERROR_LOGGER.warn( + (Supplier) () -> new ParameterizedMessage("path: {}, params: {}", + channel.request().rawPath(), channel.request().params()), e); } params = channel.request(); } - builder.field("error"); - builder.startObject(); - final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e); - builder.field("root_cause"); - builder.startArray(); - for (ElasticsearchException rootCause : rootCauses){ - builder.startObject(); - rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params)); - builder.endObject(); - } - builder.endArray(); - - ElasticsearchException.toXContent(builder, params, e); - builder.endObject(); + ElasticsearchException.renderException(builder, params, e); } else { builder.field("error", simpleMessage(e)); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 073cc6bee8d..d265db94d9e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; @@ -53,12 +52,10 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestMultiSearchAction extends BaseRestHandler { private final boolean allowExplicitIndex; - private final SearchRequestParsers searchRequestParsers; @Inject - public RestMultiSearchAction(Settings settings, RestController controller, SearchRequestParsers searchRequestParsers) { + public RestMultiSearchAction(Settings settings, RestController controller) { super(settings); - this.searchRequestParsers = searchRequestParsers; controller.registerHandler(GET, "/_msearch", this); controller.registerHandler(POST, "/_msearch", this); @@ -72,7 +69,7 @@ public class RestMultiSearchAction extends BaseRestHandler { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex, searchRequestParsers, parseFieldMatcher); + MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex, parseFieldMatcher); return channel -> client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); } @@ -80,7 +77,6 @@ public class RestMultiSearchAction extends BaseRestHandler { * Parses a {@link RestRequest} body and returns a {@link MultiSearchRequest} */ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean allowExplicitIndex, - SearchRequestParsers searchRequestParsers, ParseFieldMatcher parseFieldMatcher) throws IOException { MultiSearchRequest multiRequest = new MultiSearchRequest(); if (restRequest.hasParam("max_concurrent_searches")) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index fe4fe4a4f19..68669fe07eb 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -54,13 +53,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion; public class RestSearchAction extends BaseRestHandler { - - private final SearchRequestParsers searchRequestParsers; - @Inject - public RestSearchAction(Settings settings, RestController controller, SearchRequestParsers searchRequestParsers) { + public RestSearchAction(Settings settings, RestController controller) { super(settings); - this.searchRequestParsers = searchRequestParsers; controller.registerHandler(GET, "/_search", this); controller.registerHandler(POST, "/_search", this); controller.registerHandler(GET, "/{index}/_search", this); @@ -73,7 +68,7 @@ public class RestSearchAction extends BaseRestHandler { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SearchRequest searchRequest = new SearchRequest(); request.withContentOrSourceParamParserOrNull(parser -> - parseSearchRequest(searchRequest, request, searchRequestParsers, parseFieldMatcher, parser)); + parseSearchRequest(searchRequest, request, parseFieldMatcher, parser)); return channel -> client.search(searchRequest, new RestStatusToXContentListener<>(channel)); } @@ -84,8 +79,8 @@ public class RestSearchAction extends BaseRestHandler { * @param requestContentParser body of the request to read. This method does not attempt to read the body from the {@code request} * parameter */ - public static void parseSearchRequest(SearchRequest searchRequest, RestRequest request, SearchRequestParsers searchRequestParsers, - ParseFieldMatcher parseFieldMatcher, XContentParser requestContentParser) throws IOException { + public static void parseSearchRequest(SearchRequest searchRequest, RestRequest request, ParseFieldMatcher parseFieldMatcher, + XContentParser requestContentParser) throws IOException { if (searchRequest.source() == null) { searchRequest.source(new SearchSourceBuilder()); diff --git a/core/src/main/java/org/elasticsearch/search/SearchHit.java b/core/src/main/java/org/elasticsearch/search/SearchHit.java index c9ccddd05e6..ef9aef8fbb7 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/SearchHit.java @@ -24,7 +24,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import java.util.Map; @@ -34,7 +34,7 @@ import java.util.Map; * * @see SearchHits */ -public interface SearchHit extends Streamable, ToXContent, Iterable { +public interface SearchHit extends Streamable, ToXContentObject, Iterable { /** * The score. diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java index 951855820ad..048630dad20 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java @@ -276,7 +276,6 @@ public class SearchModule { private final Settings settings; private final List namedWriteables = new ArrayList<>(); private final List namedXContents = new ArrayList<>(); - private final SearchRequestParsers searchRequestParsers; public SearchModule(Settings settings, boolean transportClient, List plugins) { this.settings = settings; @@ -295,7 +294,6 @@ public class SearchModule { registerFetchSubPhases(plugins); registerSearchExts(plugins); registerShapes(); - searchRequestParsers = new SearchRequestParsers(); } public List getNamedWriteables() { @@ -306,10 +304,6 @@ public class SearchModule { return namedXContents; } - public SearchRequestParsers getSearchRequestParsers() { - return searchRequestParsers; - } - /** * Returns the {@link Highlighter} registry */ diff --git a/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java b/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java deleted file mode 100644 index 8a3186ad1da..00000000000 --- a/core/src/main/java/org/elasticsearch/search/SearchRequestParsers.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search; - -/** - * A container for all parsers used to parse - * {@link org.elasticsearch.action.search.SearchRequest} objects from a rest request. - */ -public class SearchRequestParsers { - public SearchRequestParsers() { - } -} diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 9fb83227188..5fd20555f81 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -22,7 +22,6 @@ package org.elasticsearch.search; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.Index; @@ -35,21 +34,20 @@ import java.io.IOException; */ public class SearchShardTarget implements Writeable, Comparable { - private Text nodeId; - private Text index; - private ShardId shardId; + private final Text nodeId; + private final ShardId shardId; public SearchShardTarget(StreamInput in) throws IOException { if (in.readBoolean()) { nodeId = in.readText(); + } else { + nodeId = null; } shardId = ShardId.readShardId(in); - index = new Text(shardId.getIndexName()); } public SearchShardTarget(String nodeId, ShardId shardId) { this.nodeId = nodeId == null ? null : new Text(nodeId); - this.index = new Text(shardId.getIndexName()); this.shardId = shardId; } @@ -58,33 +56,16 @@ public class SearchShardTarget implements Writeable, Comparable 0) { @@ -154,7 +153,7 @@ public abstract class AbstractAggregationBuilder other = (AbstractAggregationBuilder) obj; if (!Objects.equals(name, other.name)) return false; - if (!Objects.equals(type, other.type)) - return false; if (!Objects.equals(metaData, other.metaData)) return false; if (!Objects.equals(factoriesBuilder, other.factoriesBuilder)) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index a05d5091d03..14875895b77 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -38,24 +37,18 @@ public abstract class AggregationBuilder implements NamedWriteable, ToXContent, BaseAggregationBuilder { protected final String name; - protected final Type type; protected AggregatorFactories.Builder factoriesBuilder = AggregatorFactories.builder(); /** * Constructs a new aggregation builder. * * @param name The aggregation name - * @param type The aggregation type */ - protected AggregationBuilder(String name, Type type) { + protected AggregationBuilder(String name) { if (name == null) { throw new IllegalArgumentException("[name] must not be null: [" + name + "]"); } - if (type == null) { - throw new IllegalArgumentException("[type] must not be null: [" + name + "]"); - } this.name = name; - this.type = type; } /** Return this aggregation's name. */ diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 151fc57dded..14b66c475ec 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; @@ -163,7 +162,6 @@ public abstract class AggregatorFactory> { } protected final String name; - protected final Type type; protected final AggregatorFactory parent; protected final AggregatorFactories factories; protected final Map metaData; @@ -174,15 +172,12 @@ public abstract class AggregatorFactory> { * * @param name * The aggregation name - * @param type - * The aggregation type * @throws IOException * if an error occurs creating the factory */ - public AggregatorFactory(String name, Type type, SearchContext context, AggregatorFactory parent, + public AggregatorFactory(String name, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { this.name = name; - this.type = type; this.context = context; this.parent = parent; this.factories = subFactoriesBuilder.build(context, this); @@ -226,10 +221,6 @@ public abstract class AggregatorFactory> { return createInternal(parent, collectsFromSingleBucket, this.factories.createPipelineAggregators(), this.metaData); } - public String getType() { - return type.name(); - } - public AggregatorFactory getParent() { return parent; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 9928d7eb127..752b3497a92 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -38,33 +38,6 @@ import java.util.Objects; * An internal implementation of {@link Aggregation}. Serves as a base class for all aggregation implementations. */ public abstract class InternalAggregation implements Aggregation, ToXContent, NamedWriteable { - /** - * The aggregation type that holds all the string types that are associated with an aggregation: - *
    - *
  • name - used as the parser type
  • - *
- */ - public static class Type { - private final String name; - - public Type(String name) { - this.name = name; - } - - /** - * @return The name of the type of aggregation. This is the key for parsing the aggregation from XContent and is the name of the - * aggregation's builder when serialized. - */ - public String name() { - return name; - } - - @Override - public String toString() { - return name; - } - } - public static class ReduceContext { private final BigArrays bigArrays; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java index 48025e13aa1..ddd252a6f53 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.ParentChild; @@ -46,7 +45,6 @@ import java.util.Objects; public class ChildrenAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "children"; - private static final Type TYPE = new Type(NAME); private String parentType; private final String childType; @@ -60,7 +58,7 @@ public class ChildrenAggregationBuilder extends ValuesSourceAggregationBuilder

innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new ChildrenAggregatorFactory(name, type, config, parentType, childFilter, parentFilter, context, parent, + return new ChildrenAggregatorFactory(name, config, parentType, childFilter, parentFilter, context, parent, subFactoriesBuilder, metaData); } @@ -163,7 +161,7 @@ public class ChildrenAggregationBuilder extends ValuesSourceAggregationBuilder

config, String parentType, Query childFilter, + public ChildrenAggregatorFactory(String name, ValuesSourceConfig config, String parentType, Query childFilter, Query parentFilter, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.parentType = parentType; this.childFilter = childFilter; this.parentFilter = parentFilter; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java index ccf1883ecf4..9fb8b368e9d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java @@ -27,7 +27,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -35,7 +34,6 @@ import java.util.Objects; public class FilterAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "filter"; - private static final Type TYPE = new Type(NAME); private final QueryBuilder filter; @@ -48,7 +46,7 @@ public class FilterAggregationBuilder extends AbstractAggregationBuilder parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, context, parent, subFactoriesBuilder, metaData); + super(name, context, parent, subFactoriesBuilder, metaData); IndexSearcher contextSearcher = context.searcher(); Query filter = filterBuilder.toQuery(context.getQueryShardContext()); weight = contextSearcher.createNormalizedWeight(filter, false); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java index b970d809020..f34df7368ac 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator.KeyedFilter; import org.elasticsearch.search.internal.SearchContext; @@ -43,7 +42,6 @@ import java.util.Objects; public class FiltersAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "filters"; - private static final Type TYPE = new Type(NAME); private static final ParseField FILTERS_FIELD = new ParseField("filters"); private static final ParseField OTHER_BUCKET_FIELD = new ParseField("other_bucket"); @@ -65,7 +63,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder filters) { - super(name, TYPE); + super(name); // internally we want to have a fixed order of filters, regardless of the order of the filters in the request this.filters = new ArrayList<>(filters); Collections.sort(this.filters, (KeyedFilter kf1, KeyedFilter kf2) -> kf1.key().compareTo(kf2.key())); @@ -79,7 +77,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder keyedFilters = new ArrayList<>(filters.length); for (int i = 0; i < filters.length; i++) { keyedFilters.add(new KeyedFilter(String.valueOf(i), filters[i])); @@ -92,7 +90,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder(filtersSize); @@ -176,7 +174,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder filters, boolean keyed, boolean otherBucket, + public FiltersAggregatorFactory(String name, List filters, boolean keyed, boolean otherBucket, String otherBucketKey, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); this.keyed = keyed; this.otherBucket = otherBucket; this.otherBucketKey = otherBucketKey; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 26e58f51320..728ed2c8823 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.fielddata.SortingNumericDocValues; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.BucketUtils; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -51,7 +50,6 @@ import java.util.Objects; public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "geohash_grid"; - private static final Type TYPE = new Type(NAME); public static final int DEFAULT_PRECISION = 5; public static final int DEFAULT_MAX_NUM_CELLS = 10000; @@ -73,14 +71,14 @@ public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder config, int precision, int requiredSize, + public GeoHashGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.precision = precision; this.requiredSize = requiredSize; this.shardSize = shardSize; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java index c9941984d6e..2363ed498a9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java @@ -26,24 +26,22 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; public class GlobalAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "global"; - private static final Type TYPE = new Type(NAME); public GlobalAggregationBuilder(String name) { - super(name, TYPE); + super(name); } /** * Read from a stream. */ public GlobalAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE); + super(in); } @Override @@ -54,7 +52,7 @@ public class GlobalAggregationBuilder extends AbstractAggregationBuilder doBuild(SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new GlobalAggregatorFactory(name, type, context, parent, subFactoriesBuilder, metaData); + return new GlobalAggregatorFactory(name, context, parent, subFactoriesBuilder, metaData); } @Override @@ -80,7 +78,7 @@ public class GlobalAggregationBuilder extends AbstractAggregationBuilder { - public GlobalAggregatorFactory(String name, Type type, SearchContext context, AggregatorFactory parent, + public GlobalAggregatorFactory(String name, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java index f278e5e72b1..6ba3b79e968 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java @@ -32,7 +32,8 @@ import java.util.Map; * regardless the query. */ public class InternalGlobal extends InternalSingleBucketAggregation implements Global { - InternalGlobal(String name, long docCount, InternalAggregations aggregations, List pipelineAggregators, Map metaData) { + InternalGlobal(String name, long docCount, InternalAggregations aggregations, List pipelineAggregators, + Map metaData) { super(name, docCount, aggregations, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 67f662bc334..bc6bf845c85 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -54,7 +54,7 @@ import static java.util.Collections.unmodifiableMap; */ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder { - public static final String NAME = InternalDateHistogram.TYPE.name(); + public static final String NAME = "date_histogram"; public static final Map DATE_FIELD_UNITS; @@ -131,12 +131,12 @@ public class DateHistogramAggregationBuilder /** Create a new builder with the given name. */ public DateHistogramAggregationBuilder(String name) { - super(name, InternalDateHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DATE); + super(name, ValuesSourceType.NUMERIC, ValueType.DATE); } /** Read from a stream, for internal use only. */ public DateHistogramAggregationBuilder(StreamInput in) throws IOException { - super(in, InternalDateHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DATE); + super(in, ValuesSourceType.NUMERIC, ValueType.DATE); if (in.readBoolean()) { order = InternalOrder.Streams.readOrder(in); } @@ -315,7 +315,7 @@ public class DateHistogramAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } @@ -328,7 +328,7 @@ public class DateHistogramAggregationBuilder // parse any string bounds to longs and round roundedBounds = this.extendedBounds.parseAndValidate(name, context, config.format()).round(rounding); } - return new DateHistogramAggregatorFactory(name, type, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount, + return new DateHistogramAggregatorFactory(name, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount, rounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 4cde6f2530d..44bb3e02afe 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -47,11 +46,11 @@ public final class DateHistogramAggregatorFactory private final ExtendedBounds extendedBounds; private Rounding rounding; - public DateHistogramAggregatorFactory(String name, Type type, ValuesSourceConfig config, long interval, + public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, long interval, DateHistogramInterval dateHistogramInterval, long offset, InternalOrder order, boolean keyed, long minDocCount, Rounding rounding, ExtendedBounds extendedBounds, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.interval = interval; this.dateHistogramInterval = dateHistogramInterval; this.offset = offset; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 873f32c1fde..3cf143110fb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -49,7 +49,7 @@ import java.util.Objects; */ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder { - public static final String NAME = InternalHistogram.TYPE.name(); + public static final String NAME = "histogram"; private static final ObjectParser EXTENDED_BOUNDS_PARSER = new ObjectParser<>( Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName(), @@ -94,12 +94,12 @@ public class HistogramAggregationBuilder /** Create a new builder with the given name. */ public HistogramAggregationBuilder(String name) { - super(name, InternalHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DOUBLE); + super(name, ValuesSourceType.NUMERIC, ValueType.DOUBLE); } /** Read from a stream, for internal use only. */ public HistogramAggregationBuilder(StreamInput in) throws IOException { - super(in, InternalHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DOUBLE); + super(in, ValuesSourceType.NUMERIC, ValueType.DOUBLE); if (in.readBoolean()) { order = InternalOrder.Streams.readOrder(in); } @@ -260,14 +260,14 @@ public class HistogramAggregationBuilder } @Override - public String getWriteableName() { - return InternalHistogram.TYPE.name(); + public String getType() { + return NAME; } @Override protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new HistogramAggregatorFactory(name, type, config, interval, offset, order, keyed, minDocCount, minBound, maxBound, + return new HistogramAggregatorFactory(name, config, interval, offset, order, keyed, minDocCount, minBound, maxBound, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index af27b32b206..939210b63a6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -42,11 +41,11 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact private final long minDocCount; private final double minBound, maxBound; - HistogramAggregatorFactory(String name, Type type, ValuesSourceConfig config, double interval, double offset, + HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.interval = interval; this.offset = offset; this.order = order; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 56d3792e0c6..f24fc5c127e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -43,13 +43,11 @@ import java.util.ListIterator; import java.util.Map; /** - * Imelementation of {@link Histogram}. + * Implementation of {@link Histogram}. */ public final class InternalDateHistogram extends InternalMultiBucketAggregation implements Histogram, HistogramFactory { - static final Type TYPE = new Type("date_histogram"); - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { final long key; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 4dae51533db..e7f5b739216 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -40,13 +40,10 @@ import java.util.ListIterator; import java.util.Map; /** - * Imelementation of {@link Histogram}. + * Implementation of {@link Histogram}. */ public final class InternalHistogram extends InternalMultiBucketAggregation implements Histogram, HistogramFactory { - - static final Type TYPE = new Type("histogram"); - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { final double key; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index 986416922b6..9361acc8fcb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -40,7 +39,6 @@ import java.io.IOException; public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "missing"; - public static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -53,14 +51,14 @@ public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new MissingAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new MissingAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -95,7 +93,7 @@ public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder { - public MissingAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public MissingAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index 1e70da265a7..f8f2602a474 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -30,7 +30,6 @@ import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -38,7 +37,6 @@ import java.util.Objects; public class NestedAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "nested"; - private static final Type TYPE = new Type(NAME); private final String path; @@ -50,7 +48,7 @@ public class NestedAggregationBuilder extends AbstractAggregationBuilder parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); this.parentObjectMapper = parentObjectMapper; this.childObjectMapper = childObjectMapper; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java index 6adadde12ce..30bd72c6c68 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java @@ -32,7 +32,6 @@ import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -40,19 +39,18 @@ import java.util.Objects; public class ReverseNestedAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "reverse_nested"; - private static final Type TYPE = new Type(NAME); private String path; public ReverseNestedAggregationBuilder(String name) { - super(name, TYPE); + super(name); } /** * Read from a stream. */ public ReverseNestedAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE); + super(in); path = in.readOptionalString(); } @@ -93,7 +91,7 @@ public class ReverseNestedAggregationBuilder extends AbstractAggregationBuilder< if (path != null) { parentObjectMapper = context.getObjectMapper(path); if (parentObjectMapper == null) { - return new ReverseNestedAggregatorFactory(name, type, true, null, context, parent, subFactoriesBuilder, metaData); + return new ReverseNestedAggregatorFactory(name, true, null, context, parent, subFactoriesBuilder, metaData); } if (parentObjectMapper.nested().isNested() == false) { throw new AggregationExecutionException("[reverse_nested] nested path [" + path + "] is not nested"); @@ -103,7 +101,7 @@ public class ReverseNestedAggregationBuilder extends AbstractAggregationBuilder< NestedScope nestedScope = context.getQueryShardContext().nestedScope(); try { nestedScope.nextLevel(parentObjectMapper); - return new ReverseNestedAggregatorFactory(name, type, false, parentObjectMapper, context, parent, subFactoriesBuilder, + return new ReverseNestedAggregatorFactory(name, false, parentObjectMapper, context, parent, subFactoriesBuilder, metaData); } finally { nestedScope.previousLevel(); @@ -172,7 +170,7 @@ public class ReverseNestedAggregationBuilder extends AbstractAggregationBuilder< } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java index 2a691121efa..c792ca23246 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -38,11 +37,11 @@ public class ReverseNestedAggregatorFactory extends AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); this.unmapped = unmapped; this.parentObjectMapper = parentObjectMapper; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index 3de8e73ba93..156adcdc4f3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Unmapped; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -43,10 +42,10 @@ public class AbstractRangeAggregatorFactory config, R[] ranges, boolean keyed, + public AbstractRangeAggregatorFactory(String name, ValuesSourceConfig config, R[] ranges, boolean keyed, InternalRange.Factory rangeFactory, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.ranges = ranges; this.keyed = keyed; this.rangeFactory = rangeFactory; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java index e7669df2d65..635a0a6015c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java @@ -44,7 +44,7 @@ public abstract class AbstractRangeBuilder rangeFactory) { - super(name, rangeFactory.type(), rangeFactory.getValueSourceType(), rangeFactory.getValueType()); + super(name, rangeFactory.getValueSourceType(), rangeFactory.getValueType()); this.rangeFactory = rangeFactory; } @@ -53,7 +53,7 @@ public abstract class AbstractRangeBuilder rangeFactory, Writeable.Reader rangeReader) throws IOException { - super(in, rangeFactory.type(), rangeFactory.getValueSourceType(), rangeFactory.getValueType()); + super(in, rangeFactory.getValueSourceType(), rangeFactory.getValueType()); this.rangeFactory = rangeFactory; ranges = in.readList(rangeReader); keyed = in.readBoolean(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java index 86573d175c0..a8d74a7d836 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java @@ -18,33 +18,32 @@ */ package org.elasticsearch.search.aggregations.bucket.range; -import java.io.IOException; -import java.util.List; -import java.util.Map; - import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; +import java.io.IOException; +import java.util.List; +import java.util.Map; + public class BinaryRangeAggregatorFactory extends ValuesSourceAggregatorFactory { private final List ranges; private final boolean keyed; - public BinaryRangeAggregatorFactory(String name, Type type, + public BinaryRangeAggregatorFactory(String name, ValuesSourceConfig config, List ranges, boolean keyed, SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.ranges = ranges; this.keyed = keyed; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index c35956e2dcf..a2228b7a27a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -175,10 +175,6 @@ public class InternalRange> { - public Type type() { - return RangeAggregationBuilder.TYPE; - } - public ValuesSourceType getValueSourceType() { return ValuesSourceType.NUMERIC; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index e7f0a8a6d80..5692b34c57f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -23,10 +23,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -37,7 +36,6 @@ import java.io.IOException; public class RangeAggregationBuilder extends AbstractRangeBuilder { public static final String NAME = "range"; - static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -142,12 +140,12 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder parent, Builder subFactoriesBuilder) throws IOException { // We need to call processRanges here so they are parsed before we make the decision of whether to cache the request Range[] ranges = processRanges(context, config); - return new RangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, + return new RangeAggregatorFactory(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index de512fbaad4..d1dc3e71b5c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.range.InternalRange.Factory; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -33,10 +32,10 @@ import java.util.Map; public class RangeAggregatorFactory extends AbstractRangeAggregatorFactory { - public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, Range[] ranges, boolean keyed, + public RangeAggregatorFactory(String name, ValuesSourceConfig config, Range[] ranges, boolean keyed, Factory rangeFactory, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); + super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java index 5c7f31abfbf..de5622299cf 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java @@ -23,10 +23,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.range.AbstractRangeBuilder; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; @@ -40,7 +39,6 @@ import java.io.IOException; public class DateRangeAggregationBuilder extends AbstractRangeBuilder { public static final String NAME = "date_range"; - static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -75,7 +73,7 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder { - public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, Range[] ranges, boolean keyed, + public DateRangeAggregatorFactory(String name, ValuesSourceConfig config, Range[] ranges, boolean keyed, Factory rangeFactory, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); + super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java index f7b55ab9916..6a2bea74920 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java @@ -84,11 +84,6 @@ public class InternalDateRange extends InternalRange { - @Override - public Type type() { - return DateRangeAggregationBuilder.TYPE; - } - @Override public ValueType getValueType() { return ValueType.DATE; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java index a1e9712a65b..3b8e4e40271 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -51,7 +50,6 @@ import java.util.Objects; public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "geo_distance"; - public static final Type TYPE = new Type(NAME); static final ParseField ORIGIN_FIELD = new ParseField("origin", "center", "point", "por"); static final ParseField UNIT_FIELD = new ParseField("unit"); static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); @@ -215,7 +213,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde private GeoDistanceAggregationBuilder(String name, GeoPoint origin, InternalRange.Factory rangeFactory) { - super(name, rangeFactory.type(), rangeFactory.getValueSourceType(), rangeFactory.getValueType()); + super(name, rangeFactory.getValueSourceType(), rangeFactory.getValueType()); this.origin = origin; } @@ -223,8 +221,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde * Read from a stream. */ public GeoDistanceAggregationBuilder(StreamInput in) throws IOException { - super(in, InternalGeoDistance.FACTORY.type(), InternalGeoDistance.FACTORY.getValueSourceType(), - InternalGeoDistance.FACTORY.getValueType()); + super(in, InternalGeoDistance.FACTORY.getValueSourceType(), InternalGeoDistance.FACTORY.getValueType()); origin = new GeoPoint(in.readDouble(), in.readDouble()); int size = in.readVInt(); ranges = new ArrayList<>(size); @@ -345,7 +342,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde } @Override - public String getWriteableName() { + public String getType() { return NAME; } @@ -387,7 +384,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { Range[] ranges = this.ranges.toArray(new Range[this.range().size()]); - return new GeoDistanceRangeAggregatorFactory(name, type, config, origin, ranges, unit, distanceType, keyed, context, parent, + return new GeoDistanceRangeAggregatorFactory(name, config, origin, ranges, unit, distanceType, keyed, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java index 513c1fe8ee1..7ad43cce6d7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Unmapped; @@ -56,10 +55,10 @@ public class GeoDistanceRangeAggregatorFactory private final GeoDistance distanceType; private final boolean keyed; - public GeoDistanceRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, GeoPoint origin, + public GeoDistanceRangeAggregatorFactory(String name, ValuesSourceConfig config, GeoPoint origin, Range[] ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.origin = origin; this.ranges = ranges; this.unit = unit; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java index 86fc0372982..9e6518ca097 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java @@ -59,11 +59,6 @@ public class InternalGeoDistance extends InternalRange { - @Override - public Type type() { - return GeoDistanceAggregationBuilder.TYPE; - } - @Override public ValuesSourceType getValueSourceType() { return ValuesSourceType.GEOPOINT; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java index 66b0c65ed75..cb03ef7251a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java @@ -35,7 +35,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.range.BinaryRangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.BinaryRangeAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; @@ -60,7 +59,6 @@ import java.util.Objects; public final class IpRangeAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "ip_range"; - private static final InternalAggregation.Type TYPE = new InternalAggregation.Type(NAME); private static final ParseField MASK_FIELD = new ParseField("mask"); private static final ObjectParser PARSER; @@ -233,11 +231,11 @@ public final class IpRangeAggregationBuilder private List ranges = new ArrayList<>(); public IpRangeAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.BYTES, ValueType.IP); + super(name, ValuesSourceType.BYTES, ValueType.IP); } @Override - public String getWriteableName() { + public String getType() { return NAME; } @@ -339,7 +337,7 @@ public final class IpRangeAggregationBuilder } public IpRangeAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.BYTES, ValueType.IP); + super(in, ValuesSourceType.BYTES, ValueType.IP); final int numRanges = in.readVInt(); for (int i = 0; i < numRanges; ++i) { addRange(new Range(in)); @@ -374,7 +372,7 @@ public final class IpRangeAggregationBuilder for (Range range : this.ranges) { ranges.add(new BinaryRangeAggregator.Range(range.key, toBytesRef(range.from), toBytesRef(range.to))); } - return new BinaryRangeAggregatorFactory(name, TYPE, config, ranges, + return new BinaryRangeAggregatorFactory(name, config, ranges, keyed, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index 2465ef97457..78f5bd0a7af 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -41,7 +40,6 @@ import java.util.Objects; public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "diversified_sampler"; - public static final Type TYPE = new Type(NAME); public static final int MAX_DOCS_PER_VALUE_DEFAULT = 1; @@ -63,14 +61,14 @@ public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilde private String executionHint = null; public DiversifiedAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.ANY, null); + super(name, ValuesSourceType.ANY, null); } /** * Read from a stream. */ public DiversifiedAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.ANY, null); + super(in, ValuesSourceType.ANY, null); shardSize = in.readVInt(); maxDocsPerValue = in.readVInt(); executionHint = in.readOptionalString(); @@ -139,7 +137,7 @@ public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilde @Override protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new DiversifiedAggregatorFactory(name, TYPE, config, shardSize, maxDocsPerValue, executionHint, context, parent, + return new DiversifiedAggregatorFactory(name, config, shardSize, maxDocsPerValue, executionHint, context, parent, subFactoriesBuilder, metaData); } @@ -167,7 +165,7 @@ public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilde } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 971c60c5eb8..97a68649ca2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator.ExecutionMode; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -44,10 +43,10 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory< private final int maxDocsPerValue; private final String executionHint; - public DiversifiedAggregatorFactory(String name, Type type, ValuesSourceConfig config, int shardSize, int maxDocsPerValue, + public DiversifiedAggregatorFactory(String name, ValuesSourceConfig config, int shardSize, int maxDocsPerValue, String executionHint, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.shardSize = shardSize; this.maxDocsPerValue = maxDocsPerValue; this.executionHint = executionHint; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index ffca6e5b096..f69b66ffd1e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -36,21 +35,20 @@ import java.util.Objects; public class SamplerAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "sampler"; - private static final Type TYPE = new Type(NAME); public static final int DEFAULT_SHARD_SAMPLE_SIZE = 100; private int shardSize = DEFAULT_SHARD_SAMPLE_SIZE; public SamplerAggregationBuilder(String name) { - super(name, TYPE); + super(name); } /** * Read from a stream. */ public SamplerAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE); + super(in); shardSize = in.readVInt(); } @@ -77,7 +75,7 @@ public class SamplerAggregationBuilder extends AbstractAggregationBuilder parent, Builder subFactoriesBuilder) throws IOException { - return new SamplerAggregatorFactory(name, type, shardSize, context, parent, subFactoriesBuilder, metaData); + return new SamplerAggregatorFactory(name, shardSize, context, parent, subFactoriesBuilder, metaData); } @Override @@ -129,7 +127,7 @@ public class SamplerAggregationBuilder extends AbstractAggregationBuilder parent, + public SamplerAggregatorFactory(String name, int shardSize, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); this.shardSize = shardSize; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 3042824b4de..72825dbd989 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -26,12 +26,10 @@ import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; @@ -53,7 +51,6 @@ import java.util.Objects; public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "significant_terms"; - public static final InternalAggregation.Type TYPE = new Type(NAME); static final ParseField BACKGROUND_FILTER = new ParseField("background_filter"); static final ParseField HEURISTIC = new ParseField("significance_heuristic"); @@ -111,14 +108,14 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB private SignificanceHeuristic significanceHeuristic = DEFAULT_SIGNIFICANCE_HEURISTIC; public SignificantTermsAggregationBuilder(String name, ValueType valueType) { - super(name, TYPE, ValuesSourceType.ANY, valueType); + super(name, ValuesSourceType.ANY, valueType); } /** * Read from a Stream. */ public SignificantTermsAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.ANY); + super(in, ValuesSourceType.ANY); bucketCountThresholds = new BucketCountThresholds(in); executionHint = in.readOptionalString(); filterBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); @@ -267,7 +264,7 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(context); - return new SignificantTermsAggregatorFactory(name, type, config, includeExclude, executionHint, filterBuilder, + return new SignificantTermsAggregatorFactory(name, config, includeExclude, executionHint, filterBuilder, bucketCountThresholds, executionHeuristic, context, parent, subFactoriesBuilder, metaData); } @@ -303,7 +300,7 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index bbfa21a7b16..71e13d49a30 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -40,7 +40,6 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.BucketUtils; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; @@ -71,11 +70,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac private final TermsAggregator.BucketCountThresholds bucketCountThresholds; private final SignificanceHeuristic significanceHeuristic; - public SignificantTermsAggregatorFactory(String name, Type type, ValuesSourceConfig config, IncludeExclude includeExclude, + public SignificantTermsAggregatorFactory(String name, ValuesSourceConfig config, IncludeExclude includeExclude, String executionHint, QueryBuilder filterBuilder, TermsAggregator.BucketCountThresholds bucketCountThresholds, SignificanceHeuristic significanceHeuristic, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.includeExclude = includeExclude; this.executionHint = executionHint; this.filter = filterBuilder == null diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index 7e3daf5034b..d8df9501e4e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -22,10 +22,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -152,4 +154,32 @@ public class DoubleTerms extends InternalMappedTerms aggregations, ReduceContext reduceContext) { + boolean promoteToDouble = false; + for (InternalAggregation agg : aggregations) { + if (agg instanceof LongTerms && ((LongTerms) agg).format == DocValueFormat.RAW) { + /** + * this terms agg mixes longs and doubles, we must promote longs to doubles to make the internal aggs + * compatible + */ + promoteToDouble = true; + break; + } + } + if (promoteToDouble == false) { + return super.doReduce(aggregations, reduceContext); + } + List newAggs = new ArrayList<>(); + for (InternalAggregation agg : aggregations) { + if (agg instanceof LongTerms) { + DoubleTerms dTerms = LongTerms.convertLongTermsToDouble((LongTerms) agg, format); + newAggs.add(dTerms); + } else { + newAggs.add(agg); + } + } + return newAggs.get(0).doReduce(newAggs, reduceContext); + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 41092dba176..b1799b52ace 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -22,10 +22,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -152,4 +154,32 @@ public class LongTerms extends InternalMappedTerms protected Bucket[] createBucketsArray(int size) { return new Bucket[size]; } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { + for (InternalAggregation agg : aggregations) { + if (agg instanceof DoubleTerms) { + return agg.doReduce(aggregations, reduceContext); + } + } + return super.doReduce(aggregations, reduceContext); + } + + /** + * Converts a {@link LongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles. + */ + static DoubleTerms convertLongTermsToDouble(LongTerms longTerms, DocValueFormat decimalFormat) { + List buckets = longTerms.getBuckets(); + List newBuckets = new ArrayList<>(); + for (Terms.Bucket bucket : buckets) { + newBuckets.add(new DoubleTerms.Bucket(bucket.getKeyAsNumber().doubleValue(), + bucket.getDocCount(), (InternalAggregations) bucket.getAggregations(), longTerms.showTermDocCountError, + longTerms.showTermDocCountError ? bucket.getDocCountError() : 0, decimalFormat)); + } + return new DoubleTerms(longTerms.getName(), longTerms.order, longTerms.requiredSize, + longTerms.minDocCount, longTerms.pipelineAggregators(), + longTerms.metaData, longTerms.format, longTerms.shardSize, + longTerms.showTermDocCountError, longTerms.otherDocCount, + newBuckets, longTerms.docCountError); + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 95bac1fd890..944f9fd96a4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -26,12 +26,10 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; -import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; @@ -50,7 +48,6 @@ import java.util.Objects; public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "terms"; - private static final InternalAggregation.Type TYPE = new Type("terms"); public static final ParseField EXECUTION_HINT_FIELD_NAME = new ParseField("execution_hint"); public static final ParseField SHARD_SIZE_FIELD_NAME = new ParseField("shard_size"); @@ -108,14 +105,14 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new TermsAggregatorFactory(name, type, config, order, includeExclude, executionHint, collectMode, + return new TermsAggregatorFactory(name, config, order, includeExclude, executionHint, collectMode, bucketCountThresholds, showTermDocCountError, context, parent, subFactoriesBuilder, metaData); } @@ -326,7 +323,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder config, Terms.Order order, + public TermsAggregatorFactory(String name, ValuesSourceConfig config, Terms.Order order, IncludeExclude includeExclude, String executionHint, SubAggCollectionMode collectMode, TermsAggregator.BucketCountThresholds bucketCountThresholds, boolean showTermDocCountError, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.order = order; this.includeExclude = includeExclude; this.executionHint = executionHint; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java index 9269c2c7a31..0d9bd6fc1a9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,7 +40,6 @@ import java.io.IOException; public class AvgAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "avg"; - private static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -54,14 +52,14 @@ public class AvgAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } public AvgAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public AvgAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override @@ -72,7 +70,7 @@ public class AvgAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn @Override protected AvgAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new AvgAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new AvgAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -91,7 +89,7 @@ public class AvgAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java index 29b52b929fd..f1fc12ef4e5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.avg; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -36,9 +35,9 @@ import java.util.Map; public class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { - public AvgAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public AvgAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java index bf2b65436e7..1f76d8530f7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java @@ -25,10 +25,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -44,7 +43,6 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "cardinality"; - private static final Type TYPE = new Type(NAME); private static final ParseField REHASH = new ParseField("rehash").withAllDeprecated("no replacement - values will always be rehashed"); public static final ParseField PRECISION_THRESHOLD_FIELD = new ParseField("precision_threshold"); @@ -64,14 +62,14 @@ public final class CardinalityAggregationBuilder private Long precisionThreshold = null; public CardinalityAggregationBuilder(String name, ValueType targetValueType) { - super(name, TYPE, ValuesSourceType.ANY, targetValueType); + super(name, ValuesSourceType.ANY, targetValueType); } /** * Read from a stream. */ public CardinalityAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.ANY); + super(in, ValuesSourceType.ANY); if (in.readBoolean()) { precisionThreshold = in.readLong(); } @@ -124,7 +122,7 @@ public final class CardinalityAggregationBuilder @Override protected CardinalityAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new CardinalityAggregatorFactory(name, type, config, precisionThreshold, context, parent, subFactoriesBuilder, metaData); + return new CardinalityAggregatorFactory(name, config, precisionThreshold, context, parent, subFactoriesBuilder, metaData); } @Override @@ -147,7 +145,7 @@ public final class CardinalityAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java index 049f4cec9d3..0d2d32f0469 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.cardinality; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -37,10 +36,10 @@ public class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory< private final Long precisionThreshold; - public CardinalityAggregatorFactory(String name, Type type, ValuesSourceConfig config, Long precisionThreshold, + public CardinalityAggregatorFactory(String name, ValuesSourceConfig config, Long precisionThreshold, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.precisionThreshold = precisionThreshold; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java index c034c822f2a..be3ad4db802 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -41,7 +40,6 @@ import java.util.Objects; public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "geo_bounds"; - private static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -57,14 +55,14 @@ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder< private boolean wrapLongitude = true; public GeoBoundsAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); + super(name, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); } /** * Read from a stream. */ public GeoBoundsAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); + super(in, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); wrapLongitude = in.readBoolean(); } @@ -91,7 +89,7 @@ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder< @Override protected GeoBoundsAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new GeoBoundsAggregatorFactory(name, type, config, wrapLongitude, context, parent, subFactoriesBuilder, metaData); + return new GeoBoundsAggregatorFactory(name, config, wrapLongitude, context, parent, subFactoriesBuilder, metaData); } @Override @@ -112,7 +110,7 @@ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder< } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java index 745dfa0ebc3..e67ad49115a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.geobounds; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -37,10 +36,10 @@ public class GeoBoundsAggregatorFactory extends ValuesSourceAggregatorFactory config, boolean wrapLongitude, + public GeoBoundsAggregatorFactory(String name, ValuesSourceConfig config, boolean wrapLongitude, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.wrapLongitude = wrapLongitude; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java index 34689cc696d..8e173e65923 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -41,7 +40,6 @@ import java.io.IOException; public class GeoCentroidAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "geo_centroid"; - public static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -54,14 +52,14 @@ public class GeoCentroidAggregationBuilder } public GeoCentroidAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); + super(name, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); } /** * Read from a stream. */ public GeoCentroidAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); + super(in, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); } @Override @@ -72,7 +70,7 @@ public class GeoCentroidAggregationBuilder @Override protected GeoCentroidAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new GeoCentroidAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new GeoCentroidAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -91,7 +89,7 @@ public class GeoCentroidAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java index aa548dc0542..c21999d3fb4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.geocentroid; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -35,10 +34,10 @@ import java.util.Map; public class GeoCentroidAggregatorFactory extends ValuesSourceAggregatorFactory { - public GeoCentroidAggregatorFactory(String name, Type type, ValuesSourceConfig config, + public GeoCentroidAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java index f198a00e402..dafad9dacb5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,7 +40,6 @@ import java.io.IOException; public class MaxAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "max"; - public static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -54,14 +52,14 @@ public class MaxAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } public MaxAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public MaxAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override @@ -72,7 +70,7 @@ public class MaxAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn @Override protected MaxAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new MaxAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new MaxAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -91,7 +89,7 @@ public class MaxAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java index ebc5612554e..aedba76e0c7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.max; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -36,9 +35,9 @@ import java.util.Map; public class MaxAggregatorFactory extends ValuesSourceAggregatorFactory { - public MaxAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public MaxAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java index 4c34fe8dd7e..0f85748416e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -42,7 +41,6 @@ import java.io.IOException; public class MinAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "min"; - private static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -55,14 +53,14 @@ public class MinAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } public MinAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public MinAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override @@ -73,7 +71,7 @@ public class MinAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn @Override protected MinAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new MinAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new MinAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -92,7 +90,7 @@ public class MinAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java index df914978808..8f5538fb7a2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.min; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -36,9 +35,9 @@ import java.util.Map; public class MinAggregatorFactory extends ValuesSourceAggregatorFactory { - public MinAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public MinAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java index 08c4d7ec817..db322a8e70e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.HDRPercentileRanksAggregatorFactory; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestPercentileRanksAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValueType; @@ -47,7 +46,6 @@ import java.util.Objects; public class PercentileRanksAggregationBuilder extends LeafOnly { public static final String NAME = PercentileRanks.TYPE_NAME; - public static final Type TYPE = new Type(NAME); public static final ParseField VALUES_FIELD = new ParseField("values"); @@ -109,14 +107,14 @@ public class PercentileRanksAggregationBuilder extends LeafOnly parent, Builder subFactoriesBuilder) throws IOException { switch (method) { case TDIGEST: - return new TDigestPercentileRanksAggregatorFactory(name, type, config, values, compression, keyed, context, parent, + return new TDigestPercentileRanksAggregatorFactory(name, config, values, compression, keyed, context, parent, subFactoriesBuilder, metaData); case HDR: - return new HDRPercentileRanksAggregatorFactory(name, type, config, values, numberOfSignificantValueDigits, keyed, context, + return new HDRPercentileRanksAggregatorFactory(name, config, values, numberOfSignificantValueDigits, keyed, context, parent, subFactoriesBuilder, metaData); default: throw new IllegalStateException("Illegal method [" + method + "]"); @@ -286,7 +284,7 @@ public class PercentileRanksAggregationBuilder extends LeafOnly { public static final String NAME = Percentiles.TYPE_NAME; - public static final Type TYPE = new Type(NAME); public static final double[] DEFAULT_PERCENTS = new double[] { 1, 5, 25, 50, 75, 95, 99 }; public static final ParseField PERCENTS_FIELD = new ParseField("percents"); @@ -115,14 +113,14 @@ public class PercentilesAggregationBuilder extends LeafOnly parent, Builder subFactoriesBuilder) throws IOException { switch (method) { case TDIGEST: - return new TDigestPercentilesAggregatorFactory(name, type, config, percents, compression, keyed, context, parent, + return new TDigestPercentilesAggregatorFactory(name, config, percents, compression, keyed, context, parent, subFactoriesBuilder, metaData); case HDR: - return new HDRPercentilesAggregatorFactory(name, type, config, percents, numberOfSignificantValueDigits, keyed, context, parent, + return new HDRPercentilesAggregatorFactory(name, config, percents, numberOfSignificantValueDigits, keyed, context, parent, subFactoriesBuilder, metaData); default: throw new IllegalStateException("Illegal method [" + method + "]"); @@ -292,7 +290,7 @@ public class PercentilesAggregationBuilder extends LeafOnly config, double[] values, + public HDRPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] values, int numberOfSignificantValueDigits, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.values = values; this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; this.keyed = keyed; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java index 852d9175855..1074b6e142d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -40,10 +39,10 @@ public class HDRPercentilesAggregatorFactory extends ValuesSourceAggregatorFacto private final int numberOfSignificantValueDigits; private final boolean keyed; - public HDRPercentilesAggregatorFactory(String name, Type type, ValuesSourceConfig config, double[] percents, + public HDRPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, int numberOfSignificantValueDigits, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.percents = percents; this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; this.keyed = keyed; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java index 9894f9ba8af..223d25216bc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,10 +40,10 @@ public class TDigestPercentileRanksAggregatorFactory private final double compression; private final boolean keyed; - public TDigestPercentileRanksAggregatorFactory(String name, Type type, ValuesSourceConfig config, double[] percents, + public TDigestPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, double compression, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.percents = percents; this.compression = compression; this.keyed = keyed; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java index f0a7cc93810..47b17d84f3b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,10 +40,10 @@ public class TDigestPercentilesAggregatorFactory private final double compression; private final boolean keyed; - public TDigestPercentilesAggregatorFactory(String name, Type type, ValuesSourceConfig config, double[] percents, + public TDigestPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, double compression, boolean keyed, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.percents = percents; this.compression = compression; this.keyed = keyed; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 68a5138271f..2acd92395e0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -34,7 +34,6 @@ import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -45,9 +44,7 @@ import java.util.Set; import java.util.function.Function; public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder { - public static final String NAME = "scripted_metric"; - private static final Type TYPE = new Type(NAME); private static final ParseField INIT_SCRIPT_FIELD = new ParseField("init_script"); private static final ParseField MAP_SCRIPT_FIELD = new ParseField("map_script"); @@ -62,14 +59,14 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder private Map params; public ScriptedMetricAggregationBuilder(String name) { - super(name, TYPE); + super(name); } /** * Read from a stream. */ public ScriptedMetricAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE); + super(in); initScript = in.readOptionalWriteable(Script::new); mapScript = in.readOptionalWriteable(Script::new); combineScript = in.readOptionalWriteable(Script::new); @@ -203,7 +200,7 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } else { executableCombineScript = (p) -> null; } - return new ScriptedMetricAggregatorFactory(name, type, searchMapScript, executableInitScript, executableCombineScript, reduceScript, + return new ScriptedMetricAggregatorFactory(name, searchMapScript, executableInitScript, executableCombineScript, reduceScript, params, context, parent, subfactoriesBuilder, metaData); } @@ -297,7 +294,7 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index a8d3b7c3e09..bac2becc8e4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -26,7 +26,6 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -45,11 +44,11 @@ public class ScriptedMetricAggregatorFactory extends AggregatorFactory params; private final Function, ExecutableScript> initScript; - public ScriptedMetricAggregatorFactory(String name, Type type, Function, SearchScript> mapScript, + public ScriptedMetricAggregatorFactory(String name, Function, SearchScript> mapScript, Function, ExecutableScript> initScript, Function, ExecutableScript> combineScript, Script reduceScript, Map params, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); this.mapScript = mapScript; this.initScript = initScript; this.combineScript = combineScript; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java index 633626dff2b..390be44d747 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,7 +40,6 @@ import java.io.IOException; public class StatsAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "stats"; - private static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -54,14 +52,14 @@ public class StatsAggregationBuilder extends ValuesSourceAggregationBuilder.Leaf } public StatsAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public StatsAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override @@ -72,7 +70,7 @@ public class StatsAggregationBuilder extends ValuesSourceAggregationBuilder.Leaf @Override protected StatsAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new StatsAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new StatsAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -91,7 +89,7 @@ public class StatsAggregationBuilder extends ValuesSourceAggregationBuilder.Leaf } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java index 996416d2a8e..a6e59d7c75b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.stats; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -36,9 +35,9 @@ import java.util.Map; public class StatsAggregatorFactory extends ValuesSourceAggregatorFactory { - public StatsAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public StatsAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java index 75c011fe715..94857c8753f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -43,7 +42,6 @@ import java.util.Objects; public class ExtendedStatsAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "extended_stats"; - public static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -59,14 +57,14 @@ public class ExtendedStatsAggregationBuilder private double sigma = 2.0; public ExtendedStatsAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public ExtendedStatsAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); sigma = in.readDouble(); } @@ -90,7 +88,7 @@ public class ExtendedStatsAggregationBuilder @Override protected ExtendedStatsAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new ExtendedStatsAggregatorFactory(name, type, config, sigma, context, parent, subFactoriesBuilder, metaData); + return new ExtendedStatsAggregatorFactory(name, config, sigma, context, parent, subFactoriesBuilder, metaData); } @Override @@ -111,7 +109,7 @@ public class ExtendedStatsAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java index 6153c108c59..521ea8f68a6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.stats.extended; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -38,10 +37,10 @@ public class ExtendedStatsAggregatorFactory extends ValuesSourceAggregatorFactor private final double sigma; - public ExtendedStatsAggregatorFactory(String name, Type type, ValuesSourceConfig config, double sigma, + public ExtendedStatsAggregatorFactory(String name, ValuesSourceConfig config, double sigma, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); this.sigma = sigma; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java index 9645610dc8c..7118b14d0cd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java @@ -24,10 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,7 +40,6 @@ import java.io.IOException; public class SumAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "sum"; - private static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -54,14 +52,14 @@ public class SumAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } public SumAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public SumAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override @@ -72,7 +70,7 @@ public class SumAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn @Override protected SumAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new SumAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new SumAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -91,7 +89,7 @@ public class SumAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java index a7dad9cac7f..8b6103214a7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.sum; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -36,9 +35,9 @@ import java.util.Map; public class SumAggregatorFactory extends ValuesSourceAggregatorFactory { - public SumAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public SumAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 68932d65b16..a8ec235c563 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -34,8 +34,6 @@ import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.StoredFieldsContext; @@ -60,7 +58,6 @@ import java.util.Set; public class TopHitsAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "top_hits"; - private static final InternalAggregation.Type TYPE = new Type(NAME); private int from = 0; private int size = 3; @@ -75,14 +72,14 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder scriptFields; private final FetchSourceContext fetchSourceContext; - public TopHitsAggregatorFactory(String name, Type type, int from, int size, boolean explain, boolean version, boolean trackScores, + public TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean trackScores, Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, type, context, parent, subFactories, metaData); + super(name, context, parent, subFactories, metaData); this.from = from; this.size = size; this.explain = explain; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java index fdd83d32aa0..50916b4063c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregationBuilder.java @@ -27,7 +27,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -40,7 +39,6 @@ import java.io.IOException; public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "value_count"; - public static final Type TYPE = new Type(NAME); private static final ObjectParser PARSER; static { @@ -53,14 +51,14 @@ public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder } public ValueCountAggregationBuilder(String name, ValueType targetValueType) { - super(name, TYPE, ValuesSourceType.ANY, targetValueType); + super(name, ValuesSourceType.ANY, targetValueType); } /** * Read from a stream. */ public ValueCountAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.ANY); + super(in, ValuesSourceType.ANY); } @Override @@ -76,7 +74,7 @@ public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder @Override protected ValueCountAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - return new ValueCountAggregatorFactory(name, type, config, context, parent, subFactoriesBuilder, metaData); + return new ValueCountAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); } @Override @@ -95,7 +93,7 @@ public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java index 2dc40e1d8ea..80c8001b93c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.valuecount; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -35,9 +34,9 @@ import java.util.Map; public class ValueCountAggregatorFactory extends ValuesSourceAggregatorFactory { - public ValueCountAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public ValueCountAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 859fbb12eb1..45b6afe43ee 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -27,7 +27,6 @@ import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTimeZone; @@ -40,28 +39,29 @@ public abstract class ValuesSourceAggregationBuilder> extends ValuesSourceAggregationBuilder { - protected LeafOnly(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { - super(name, type, valuesSourceType, targetValueType); + protected LeafOnly(String name, ValuesSourceType valuesSourceType, ValueType targetValueType) { + super(name, valuesSourceType, targetValueType); } /** * Read an aggregation from a stream that does not serialize its targetValueType. This should be used by most subclasses. */ - protected LeafOnly(StreamInput in, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) throws IOException { - super(in, type, valuesSourceType, targetValueType); + protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType, ValueType targetValueType) throws IOException { + super(in, valuesSourceType, targetValueType); } /** * Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override * {@link #serializeTargetValueType()} to return true. */ - protected LeafOnly(StreamInput in, Type type, ValuesSourceType valuesSourceType) throws IOException { - super(in, type, valuesSourceType); + protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType) throws IOException { + super(in, valuesSourceType); } @Override public AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException("Aggregator [" + name + "] of type [" + type + "] cannot accept sub-aggregations"); + throw new AggregationInitializationException("Aggregator [" + name + "] of type [" + + getType() + "] cannot accept sub-aggregations"); } } @@ -75,8 +75,8 @@ public abstract class ValuesSourceAggregationBuilder config; - protected ValuesSourceAggregationBuilder(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { - super(name, type); + protected ValuesSourceAggregationBuilder(String name, ValuesSourceType valuesSourceType, ValueType targetValueType) { + super(name); if (valuesSourceType == null) { throw new IllegalArgumentException("[valuesSourceType] must not be null: [" + name + "]"); } @@ -87,9 +87,9 @@ public abstract class ValuesSourceAggregationBuilder config; - public ValuesSourceAggregatorFactory(String name, Type type, ValuesSourceConfig config, SearchContext context, + public ValuesSourceAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, context, parent, subFactoriesBuilder, metaData); + super(name, context, parent, subFactoriesBuilder, metaData); this.config = config; } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java index 2015ac2b6e1..7ff2147868a 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java @@ -34,6 +34,8 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + /** * A field highlighted with its highlighted fragments. */ @@ -121,13 +123,16 @@ public class HighlightField implements ToXContent, Streamable { } public static HighlightField fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.nextToken(); - assert token == XContentParser.Token.FIELD_NAME; + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); String fieldName = parser.currentName(); Text[] fragments = null; - token = parser.nextToken(); + XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.START_ARRAY) { - fragments = parseValues(parser); + List values = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + values.add(new Text(parser.text())); + } + fragments = values.toArray(new Text[values.size()]); } else if (token == XContentParser.Token.VALUE_NULL) { fragments = null; } else { @@ -137,14 +142,6 @@ public class HighlightField implements ToXContent, Streamable { return new HighlightField(fieldName, fragments); } - private static Text[] parseValues(XContentParser parser) throws IOException { - List values = new ArrayList<>(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - values.add(new Text(parser.text())); - } - return values.toArray(new Text[values.size()]); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(name); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 5784c31d99c..c923ee9dd0a 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -35,6 +36,9 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; @@ -45,6 +49,7 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -58,6 +63,10 @@ import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseStoredFieldsValue; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; import static org.elasticsearch.search.fetch.subphase.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; @@ -65,7 +74,8 @@ public class InternalSearchHit implements SearchHit { private transient int docId; - private float score = Float.NEGATIVE_INFINITY; + private static final float DEFAULT_SCORE = Float.NEGATIVE_INFINITY; + private float score = DEFAULT_SCORE; private Text id; private Text type; @@ -89,6 +99,8 @@ public class InternalSearchHit implements SearchHit { @Nullable private SearchShardTarget shard; + private transient String index; + private Map sourceAsMap; private byte[] sourceAsBytes; @@ -103,20 +115,17 @@ public class InternalSearchHit implements SearchHit { } public InternalSearchHit(int docId, String id, Text type, Map fields) { - this.docId = docId; + this(docId, id, type, null, fields); + } + + public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map fields) { + this.docId = nestedTopDocId; if (id != null) { this.id = new Text(id); } else { this.id = null; } this.type = type; - this.fields = fields; - } - - public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map fields) { - this.docId = nestedTopDocId; - this.id = new Text(id); - this.type = type; this.nestedIdentity = nestedIdentity; this.fields = fields; } @@ -125,15 +134,6 @@ public class InternalSearchHit implements SearchHit { return this.docId; } - public void shardTarget(SearchShardTarget shardTarget) { - this.shard = shardTarget; - if (innerHits != null) { - for (InternalSearchHits searchHits : innerHits.values()) { - searchHits.shardTarget(shardTarget); - } - } - } - public void score(float score) { this.score = score; } @@ -164,7 +164,7 @@ public class InternalSearchHit implements SearchHit { @Override public String index() { - return shard.index(); + return this.index; } @Override @@ -229,14 +229,6 @@ public class InternalSearchHit implements SearchHit { return sourceRef(); } - /** - * Internal source representation, might be compressed.... - */ - public BytesReference internalSourceRef() { - return source; - } - - @Override public byte[] source() { if (source == null) { @@ -318,10 +310,6 @@ public class InternalSearchHit implements SearchHit { this.fields = fields; } - public Map internalHighlightFields() { - return highlightFields; - } - @Override public Map highlightFields() { return highlightFields == null ? emptyMap() : highlightFields; @@ -337,7 +325,11 @@ public class InternalSearchHit implements SearchHit { } public void sortValues(Object[] sortValues, DocValueFormat[] sortValueFormats) { - this.sortValues = new SearchSortValues(sortValues, sortValueFormats); + sortValues(new SearchSortValues(sortValues, sortValueFormats)); + } + + public void sortValues(SearchSortValues sortValues) { + this.sortValues = sortValues; } @Override @@ -376,6 +368,9 @@ public class InternalSearchHit implements SearchHit { public void shard(SearchShardTarget target) { this.shard = target; + if (target != null) { + this.index = target.getIndex(); + } } public void matchedQueries(String[] matchedQueries) { @@ -417,6 +412,8 @@ public class InternalSearchHit implements SearchHit { static final String DESCRIPTION = "description"; static final String DETAILS = "details"; static final String INNER_HITS = "inner_hits"; + static final String _SHARD = "_shard"; + static final String _NODE = "_node"; } // public because we render hit as part of completion suggestion option @@ -439,14 +436,14 @@ public class InternalSearchHit implements SearchHit { // For inner_hit hits shard is null and that is ok, because the parent search hit has all this information. // Even if this was included in the inner_hit hits this would be the same, so better leave it out. if (explanation() != null && shard != null) { - builder.field("_shard", shard.shardId()); - builder.field("_node", shard.nodeIdText()); + builder.field(Fields._SHARD, shard.getShardId()); + builder.field(Fields._NODE, shard.getNodeIdText()); } if (nestedIdentity != null) { nestedIdentity.toXContent(builder, params); } else { - if (shard != null) { - builder.field(Fields._INDEX, shard.indexText()); + if (index != null) { + builder.field(Fields._INDEX, index); } if (type != null) { builder.field(Fields._TYPE, type); @@ -468,7 +465,7 @@ public class InternalSearchHit implements SearchHit { builder.field(field.name(), value); } if (source != null) { - XContentHelper.writeRawField("_source", source, builder, params); + XContentHelper.writeRawField(SourceFieldMapper.NAME, source, builder, params); } if (!otherFields.isEmpty()) { builder.startObject(Fields.FIELDS); @@ -512,6 +509,160 @@ public class InternalSearchHit implements SearchHit { return builder; } + public static InternalSearchHit fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + String currentFieldName = null; + String type = null, id = null; + String index = null; + float score = DEFAULT_SCORE; + long version = -1; + SearchSortValues sortValues = SearchSortValues.EMPTY; + InternalNestedIdentity nestedIdentity = null; + Map highlightFields = new HashMap<>(); + BytesReference parsedSource = null; + List matchedQueries = new ArrayList<>(); + Map fields = new HashMap<>(); + Explanation explanation = null; + ShardId shardId = null; + String nodeId = null; + Map innerHits = null; + while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (Fields._TYPE.equals(currentFieldName)) { + type = parser.text(); + } else if (Fields._INDEX.equals(currentFieldName)) { + index = parser.text(); + } else if (Fields._ID.equals(currentFieldName)) { + id = parser.text(); + } else if (Fields._SCORE.equals(currentFieldName)) { + score = parser.floatValue(); + } else if (Fields._VERSION.equals(currentFieldName)) { + version = parser.longValue(); + } else if (Fields._SHARD.equals(currentFieldName)) { + shardId = ShardId.fromString(parser.text()); + } else if (Fields._NODE.equals(currentFieldName)) { + nodeId = parser.text(); + } else if (MapperService.isMetadataField(currentFieldName)) { + List values = new ArrayList<>(); + values.add(parseStoredFieldsValue(parser)); + fields.put(currentFieldName, new InternalSearchHitField(currentFieldName, values)); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.VALUE_NULL) { + if (Fields._SCORE.equals(currentFieldName)) { + score = Float.NaN; + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (SourceFieldMapper.NAME.equals(currentFieldName)) { + try (XContentBuilder builder = XContentBuilder.builder(parser.contentType().xContent())) { + //the original document gets slightly modified: whitespaces or pretty printing are not preserved, + //it all depends on the current builder settings + builder.copyCurrentStructure(parser); + parsedSource = builder.bytes(); + } + } else if (Fields.HIGHLIGHT.equals(currentFieldName)) { + while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + HighlightField highlightField = HighlightField.fromXContent(parser); + highlightFields.put(highlightField.getName(), highlightField); + } + } else if (Fields.FIELDS.equals(currentFieldName)) { + while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + String fieldName = parser.currentName(); + List values = new ArrayList<>(); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); + while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + values.add(parseStoredFieldsValue(parser)); + } + fields.put(fieldName, new InternalSearchHitField(fieldName, values)); + } + } else if (Fields._EXPLANATION.equals(currentFieldName)) { + explanation = parseExplanation(parser); + } else if (Fields.INNER_HITS.equals(currentFieldName)) { + innerHits = new HashMap<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + // parse the key + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + String name = parser.currentName(); + innerHits.put(name, InternalSearchHits.fromXContent(parser)); + parser.nextToken(); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.currentToken(), parser::getTokenLocation); + } + } else if (InternalNestedIdentity.Fields._NESTED.equals(currentFieldName)) { + nestedIdentity = InternalNestedIdentity.fromXContent(parser); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (Fields.SORT.equals(currentFieldName)) { + sortValues = SearchSortValues.fromXContent(parser); + } else if (Fields.MATCHED_QUERIES.equals(currentFieldName)) { + while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + matchedQueries.add(parser.text()); + } + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else { + throwUnknownToken(token, parser.getTokenLocation()); + } + } + InternalSearchHit internalSearchHit = new InternalSearchHit(-1, id, new Text(type), nestedIdentity, Collections.emptyMap()); + internalSearchHit.index = index; + internalSearchHit.score(score); + internalSearchHit.version(version); + internalSearchHit.sortValues(sortValues); + internalSearchHit.highlightFields(highlightFields); + internalSearchHit.sourceRef(parsedSource); + internalSearchHit.explanation(explanation); + internalSearchHit.setInnerHits(innerHits); + if (matchedQueries.size() > 0) { + internalSearchHit.matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()])); + } + if (shardId != null && nodeId != null) { + internalSearchHit.shard(new SearchShardTarget(nodeId, shardId)); + } + internalSearchHit.fields(fields); + return internalSearchHit; + } + + private static Explanation parseExplanation(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + XContentParser.Token token; + Float value = null; + String description = null; + List details = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, () -> parser.getTokenLocation()); + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + if (Fields.VALUE.equals(currentFieldName)) { + value = parser.floatValue(); + } else if (Fields.DESCRIPTION.equals(currentFieldName)) { + description = parser.textOrNull(); + } else if (Fields.DETAILS.equals(currentFieldName)) { + ensureExpectedToken(XContentParser.Token.START_ARRAY, token, () -> parser.getTokenLocation()); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + details.add(parseExplanation(parser)); + } + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } + if (value == null) { + throw new ParsingException(parser.getTokenLocation(), "missing explanation value"); + } + if (description == null) { + throw new ParsingException(parser.getTokenLocation(), "missing explanation description"); + } + return Explanation.match(value, description, details); + } + private void buildExplanation(XContentBuilder builder, Explanation explanation) throws IOException { builder.startObject(); builder.field(Fields.VALUE, explanation.getValue()); @@ -595,7 +746,8 @@ public class InternalSearchHit implements SearchHit { matchedQueries[i] = in.readString(); } } - shard = in.readOptionalWriteable(SearchShardTarget::new); + // we call the setter here because that also sets the local index parameter + shard(in.readOptionalWriteable(SearchShardTarget::new)); size = in.readVInt(); if (size > 0) { innerHits = new HashMap<>(size); @@ -762,5 +914,4 @@ public class InternalSearchHit implements SearchHit { static final String _NESTED_OFFSET = "offset"; } } - } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java index 9b82c8783a1..f4010ee1927 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -22,14 +22,19 @@ package org.elasticsearch.search.internal; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.List; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit; public class InternalSearchHits implements SearchHits { @@ -132,6 +137,44 @@ public class InternalSearchHits implements SearchHits { return builder; } + public static InternalSearchHits fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + } + XContentParser.Token token = parser.currentToken(); + String currentFieldName = null; + List hits = new ArrayList<>(); + long totalHits = 0; + float maxScore = 0f; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (Fields.TOTAL.equals(currentFieldName)) { + totalHits = parser.longValue(); + } else if (Fields.MAX_SCORE.equals(currentFieldName)) { + maxScore = parser.floatValue(); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.VALUE_NULL) { + if (Fields.MAX_SCORE.equals(currentFieldName)) { + maxScore = Float.NaN; // NaN gets rendered as null-field + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + hits.add(InternalSearchHit.fromXContent(parser)); + } + } + } + InternalSearchHits internalSearchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, + maxScore); + return internalSearchHits; + } + public static InternalSearchHits readSearchHits(StreamInput in) throws IOException { InternalSearchHits hits = new InternalSearchHits(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchSortValues.java b/core/src/main/java/org/elasticsearch/search/internal/SearchSortValues.java index a4fcb18f828..9aa29d7768f 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchSortValues.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchSortValues.java @@ -139,9 +139,7 @@ public class SearchSortValues implements ToXContent, Writeable { } public static SearchSortValues fromXContent(XContentParser parser) throws IOException { - XContentParserUtils.ensureFieldName(parser, parser.currentToken(), Fields.SORT); - XContentParser.Token token = parser.nextToken(); - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, token, parser::getTokenLocation); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser::getTokenLocation); return new SearchSortValues(parser.list().toArray()); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index d6598eb3a12..6b42d178694 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -27,13 +27,15 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -43,7 +45,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.engine.SnapshotFailedEngineException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -68,7 +69,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; @@ -105,8 +105,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private volatile Map shardSnapshots = emptyMap(); - private final BlockingQueue updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue(); - + private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); @Inject public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool, @@ -458,8 +457,6 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private ShardId shardId; private ShardSnapshotStatus status; - private volatile boolean processed; // state field, no need to serialize - public UpdateIndexShardSnapshotStatusRequest() { } @@ -502,14 +499,6 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements public String toString() { return "" + snapshot + ", shardId [" + shardId + "], status [" + status.state() + "]"; } - - public void markAsProcessed() { - processed = true; - } - - public boolean isProcessed() { - return processed; - } } /** @@ -531,83 +520,65 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements */ private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request) { logger.trace("received updated snapshot restore state [{}]", request); - updatedSnapshotStateQueue.add(request); + clusterService.submitStateUpdateTask( + "update snapshot state", + request, + ClusterStateTaskConfig.build(Priority.NORMAL), + snapshotStateExecutor, + (source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", + request.snapshot(), request.shardId(), request.status()), e)); + } - clusterService.submitStateUpdateTask("update snapshot state", new ClusterStateUpdateTask() { - private final List drainedRequests = new ArrayList<>(); + class SnapshotStateExecutor implements ClusterStateTaskExecutor { - @Override - public ClusterState execute(ClusterState currentState) { - // The request was already processed as a part of an early batch - skipping - if (request.isProcessed()) { - return currentState; - } + @Override + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots != null) { + int changedCount = 0; + final List entries = new ArrayList<>(); + for (SnapshotsInProgress.Entry entry : snapshots.entries()) { + ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); + boolean updated = false; - updatedSnapshotStateQueue.drainTo(drainedRequests); - - final int batchSize = drainedRequests.size(); - - // nothing to process (a previous event has processed it already) - if (batchSize == 0) { - return currentState; - } - - final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots != null) { - int changedCount = 0; - final List entries = new ArrayList<>(); - for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - boolean updated = false; - - for (int i = 0; i < batchSize; i++) { - final UpdateIndexShardSnapshotStatusRequest updateSnapshotState = drainedRequests.get(i); - updateSnapshotState.markAsProcessed(); - - if (entry.snapshot().equals(updateSnapshotState.snapshot())) { - logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state()); - if (updated == false) { - shards.putAll(entry.shards()); - updated = true; - } - shards.put(updateSnapshotState.shardId(), updateSnapshotState.status()); - changedCount++; + for (UpdateIndexShardSnapshotStatusRequest updateSnapshotState : tasks) { + if (entry.snapshot().equals(updateSnapshotState.snapshot())) { + logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state()); + if (updated == false) { + shards.putAll(entry.shards()); + updated = true; } + shards.put(updateSnapshotState.shardId(), updateSnapshotState.status()); + changedCount++; } + } - if (updated) { - if (completed(shards.values()) == false) { - entries.add(new SnapshotsInProgress.Entry(entry, shards.build())); - } else { - // Snapshot is finished - mark it as done - // TODO: Add PARTIAL_SUCCESS status? - SnapshotsInProgress.Entry updatedEntry = new SnapshotsInProgress.Entry(entry, State.SUCCESS, shards.build()); - entries.add(updatedEntry); - // Finalize snapshot in the repository - snapshotsService.endSnapshot(updatedEntry); - logger.info("snapshot [{}] is done", updatedEntry.snapshot()); - } + if (updated) { + if (completed(shards.values()) == false) { + entries.add(new SnapshotsInProgress.Entry(entry, shards.build())); } else { - entries.add(entry); + // Snapshot is finished - mark it as done + // TODO: Add PARTIAL_SUCCESS status? + SnapshotsInProgress.Entry updatedEntry = new SnapshotsInProgress.Entry(entry, State.SUCCESS, shards.build()); + entries.add(updatedEntry); + // Finalize snapshot in the repository + snapshotsService.endSnapshot(updatedEntry); + logger.info("snapshot [{}] is done", updatedEntry.snapshot()); } - } - if (changedCount > 0) { - logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - - final SnapshotsInProgress updatedSnapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build(); + } else { + entries.add(entry); } } - return currentState; - } + if (changedCount > 0) { + logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - @Override - public void onFailure(String source, Exception e) { - for (UpdateIndexShardSnapshotStatusRequest request : drainedRequests) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e); + final SnapshotsInProgress updatedSnapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); + return ClusterTasksResult.builder().successes(tasks).build( + ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build()); } } - }); + return ClusterTasksResult.builder().successes(tasks).build(currentState); + } } /** diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index c2f0832b75e..f2b29706caf 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -477,8 +477,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i @Override public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException { + boolean success = false; + NodeChannels nodeChannels = null; try { - NodeChannels nodeChannels = connectToChannels(node, connectionProfile); + nodeChannels = connectToChannels(node, connectionProfile); final Channel channel = nodeChannels.getChannels().get(0); // one channel is guaranteed by the connection profile final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ? defaultConnectionProfile.getConnectTimeout() : @@ -487,13 +489,19 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i connectTimeout : connectionProfile.getHandshakeTimeout(); final Version version = executeHandshake(node, channel, handshakeTimeout); transportServiceAdapter.onConnectionOpened(node); - return new NodeChannels(nodeChannels, version); // clone the channels - we now have the correct version + nodeChannels = new NodeChannels(nodeChannels, version);// clone the channels - we now have the correct version + success = true; + return nodeChannels; } catch (ConnectTransportException e) { throw e; } catch (Exception e) { // ConnectTransportExceptions are handled specifically on the caller end - we wrap the actual exception to ensure // only relevant exceptions are logged on the caller end.. this is the same as in connectToNode throw new ConnectTransportException(node, "general node connection failure", e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(nodeChannels); + } } } @@ -832,7 +840,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } @Override - protected final void doClose() { + protected void doClose() { } @Override diff --git a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java index 1bc895095ba..adab07f52a3 100644 --- a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; @@ -41,9 +40,8 @@ import java.util.Collections; import java.util.List; public class ExplainRequestTests extends ESTestCase { + private NamedWriteableRegistry namedWriteableRegistry; - protected NamedWriteableRegistry namedWriteableRegistry; - protected SearchRequestParsers searchRequestParsers; public void setUp() throws Exception { super.setUp(); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); @@ -52,10 +50,8 @@ public class ExplainRequestTests extends ESTestCase { entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); - searchRequestParsers = searchModule.getSearchRequestParsers(); } - public void testSerialize() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { ExplainRequest request = new ExplainRequest("index", "type", "id"); diff --git a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java index c1d18146a08..55bc37df18a 100644 --- a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; @@ -42,9 +41,8 @@ import java.util.Collections; import java.util.List; public class ShardValidateQueryRequestTests extends ESTestCase { - protected NamedWriteableRegistry namedWriteableRegistry; - protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { super.setUp(); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); @@ -53,10 +51,8 @@ public class ShardValidateQueryRequestTests extends ESTestCase { entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); - searchRequestParsers = searchModule.getSearchRequestParsers(); } - public void testSerialize() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 1d1532c4919..a7225296268 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -147,7 +147,7 @@ public class IndexRequestTests extends ESTestCase { assertEquals("IndexResponse[index=" + shardId.getIndexName() + ",type=" + type + ",id="+ id + ",version=" + version + ",result=" + (created ? "created" : "updated") + ",seqNo=" + SequenceNumbersService.UNASSIGNED_SEQ_NO + - ",shards={\"_shards\":{\"total\":" + total + ",\"successful\":" + successful + ",\"failed\":0}}]", + ",shards={\"total\":" + total + ",\"successful\":" + successful + ",\"failed\":0}]", indexResponse.toString()); } } diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java new file mode 100644 index 00000000000..326b44116f8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.index; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.RandomObjects; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; + +public class IndexResponseTests extends ESTestCase { + + public void testToXContent() throws IOException { + { + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 5, true); + String output = Strings.toString(indexResponse); + assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":5,\"result\":\"created\",\"_shards\":null," + + "\"_seq_no\":3,\"created\":true}", output); + } + { + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 7, true); + indexResponse.setForcedRefresh(true); + indexResponse.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); + String output = Strings.toString(indexResponse); + assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":7,\"result\":\"created\"," + + "\"forced_refresh\":true,\"_shards\":{\"total\":10,\"successful\":5,\"failed\":0},\"created\":true}", output); + } + } + + public void testToAndFromXContent() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + // Create a random IndexResponse and converts it to XContent in bytes + IndexResponse indexResponse = randomIndexResponse(); + BytesReference indexResponseBytes = toXContent(indexResponse, xContentType); + + // Parse the XContent bytes to obtain a parsed + IndexResponse parsedIndexResponse; + try (XContentParser parser = createParser(xContentType.xContent(), indexResponseBytes)) { + parsedIndexResponse = IndexResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + + // We can't use equals() to compare the original and the parsed index response + // because the random index response can contain shard failures with exceptions, + // and those exceptions are not parsed back with the same types. + + // Print the parsed object out and test that the output is the same as the original output + BytesReference parsedIndexResponseBytes = toXContent(parsedIndexResponse, xContentType); + try (XContentParser parser = createParser(xContentType.xContent(), parsedIndexResponseBytes)) { + assertIndexResponse(indexResponse, parser.map()); + } + } + + private static void assertIndexResponse(IndexResponse expected, Map actual) { + assertEquals(expected.getIndex(), actual.get("_index")); + assertEquals(expected.getType(), actual.get("_type")); + assertEquals(expected.getId(), actual.get("_id")); + assertEquals(expected.getVersion(), ((Integer) actual.get("_version")).longValue()); + assertEquals(expected.getResult().getLowercase(), actual.get("result")); + if (expected.forcedRefresh()) { + assertTrue((Boolean) actual.get("forced_refresh")); + } else { + assertFalse(actual.containsKey("forced_refresh")); + } + if (expected.getSeqNo() >= 0) { + assertEquals(expected.getSeqNo(), ((Integer) actual.get("_seq_no")).longValue()); + } else { + assertFalse(actual.containsKey("_seq_no")); + } + + Map actualShards = (Map) actual.get("_shards"); + assertNotNull(actualShards); + assertEquals(expected.getShardInfo().getTotal(), actualShards.get("total")); + assertEquals(expected.getShardInfo().getSuccessful(), actualShards.get("successful")); + assertEquals(expected.getShardInfo().getFailed(), actualShards.get("failed")); + + List> actualFailures = (List>) actualShards.get("failures"); + if (CollectionUtils.isEmpty(expected.getShardInfo().getFailures())) { + assertNull(actualFailures); + } else { + assertEquals(expected.getShardInfo().getFailures().length, actualFailures.size()); + for (int i = 0; i < expected.getShardInfo().getFailures().length; i++) { + ReplicationResponse.ShardInfo.Failure failure = expected.getShardInfo().getFailures()[i]; + Map actualFailure = actualFailures.get(i); + + assertEquals(failure.index(), actualFailure.get("_index")); + assertEquals(failure.shardId(), actualFailure.get("_shard")); + assertEquals(failure.nodeId(), actualFailure.get("_node")); + assertEquals(failure.status(), RestStatus.valueOf((String) actualFailure.get("status"))); + assertEquals(failure.primary(), actualFailure.get("primary")); + + Throwable cause = failure.getCause(); + Map actualClause = (Map) actualFailure.get("reason"); + assertNotNull(actualClause); + while (cause != null) { + // The expected IndexResponse has been converted in XContent, then the resulting bytes have been + // parsed to create a new parsed IndexResponse. During this process, the type of the exceptions + // have been lost. + assertEquals("exception", actualClause.get("type")); + String expectedMessage = "Elasticsearch exception [type=" + ElasticsearchException.getExceptionName(cause) + + ", reason=" + cause.getMessage() + "]"; + assertEquals(expectedMessage, actualClause.get("reason")); + + if (cause instanceof ElasticsearchException) { + ElasticsearchException ex = (ElasticsearchException) cause; + Map actualHeaders = (Map) actualClause.get("header"); + + // When a IndexResponse is converted to XContent, the exception headers that start with "es." + // are added to the XContent as fields with the prefix removed. Other headers are added under + // a "header" root object. + // In the test, the "es." prefix is lost when the XContent is generating, so when the parsed + // IndexResponse is converted back to XContent all exception headers are under the "header" object. + for (String name : ex.getHeaderKeys()) { + assertEquals(ex.getHeader(name).get(0), actualHeaders.get(name.replaceFirst("es.", ""))); + } + } + actualClause = (Map) actualClause.get("caused_by"); + cause = cause.getCause(); + } + } + } + } + + private static IndexResponse randomIndexResponse() { + ShardId shardId = new ShardId(randomAsciiOfLength(5), randomAsciiOfLength(5), randomIntBetween(0, 5)); + String type = randomAsciiOfLength(5); + String id = randomAsciiOfLength(5); + long seqNo = randomIntBetween(-2, 5); + long version = (long) randomIntBetween(0, 5); + boolean created = randomBoolean(); + + IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created); + indexResponse.setForcedRefresh(randomBoolean()); + indexResponse.setShardInfo(RandomObjects.randomShardInfo(random(), randomBoolean())); + return indexResponse; + } + +} diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index a5bd3df4d63..c4eac65c212 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.search.RestMultiSearchAction; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.FakeRestRequest; @@ -163,7 +162,7 @@ public class MultiSearchRequestTests extends ESTestCase { private MultiSearchRequest parseMultiSearchRequest(String sample) throws IOException { byte[] data = StreamsUtils.copyToBytesFromClasspath(sample); RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(data)).build(); - return RestMultiSearchAction.parseRequest(restRequest, true, new SearchRequestParsers(), ParseFieldMatcher.EMPTY); + return RestMultiSearchAction.parseRequest(restRequest, true, ParseFieldMatcher.EMPTY); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java index 658853f9598..0972a91c8ec 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -20,17 +20,17 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; -import org.elasticsearch.index.shard.IndexShardRecoveringException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.test.RandomObjects; import java.io.IOException; import java.util.ArrayList; @@ -42,6 +42,7 @@ import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.instanceOf; public class ReplicationResponseTests extends ESTestCase { @@ -64,12 +65,13 @@ public class ReplicationResponseTests extends ESTestCase { new ReplicationResponse.ShardInfo(shardInfo.getTotal(), shardInfo.getSuccessful() + 1, shardInfo.getFailures())); mutations.add(() -> { int nbFailures = randomIntBetween(1, 5); - return new ReplicationResponse.ShardInfo(shardInfo.getTotal(), shardInfo.getSuccessful(), randomFailures(nbFailures)); + ReplicationResponse.ShardInfo.Failure[] randomFailures = RandomObjects.randomShardInfoFailures(random(), nbFailures); + return new ReplicationResponse.ShardInfo(shardInfo.getTotal(), shardInfo.getSuccessful(), randomFailures); }); return randomFrom(mutations).get(); }; - checkEqualsAndHashCode(randomShardInfo(), copy, mutate); + checkEqualsAndHashCode(RandomObjects.randomShardInfo(random(), randomBoolean()), copy, mutate); } public void testFailureEqualsAndHashcode() { @@ -127,7 +129,7 @@ public class ReplicationResponseTests extends ESTestCase { return randomFrom(mutations).get(); }; - checkEqualsAndHashCode(randomFailure(), copy, mutate); + checkEqualsAndHashCode(RandomObjects.randomShardInfoFailure(random()), copy, mutate); } public void testShardInfoToXContent() throws IOException { @@ -136,11 +138,9 @@ public class ReplicationResponseTests extends ESTestCase { final ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3); final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType); - // Expected JSON is {"_shards":{"total":5,"successful":3,"failed":0}} + // Expected JSON is {"total":5,"successful":3,"failed":0} + assertThat(shardInfo, instanceOf(ToXContentObject.class)); try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) { - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); - assertEquals("_shards", parser.currentName()); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals("total", parser.currentName()); @@ -155,7 +155,6 @@ public class ReplicationResponseTests extends ESTestCase { assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); assertEquals(shardInfo.getFailed(), parser.intValue()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); - assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); } } @@ -168,10 +167,9 @@ public class ReplicationResponseTests extends ESTestCase { ReplicationResponse.ShardInfo parsedShardInfo; try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) { - // Move to the start object that was manually added when building the object + // Move to the first start object assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); parsedShardInfo = ReplicationResponse.ShardInfo.fromXContent(parser); - assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); } // We can use assertEquals because the shardInfo doesn't have a failure (and exceptions) @@ -184,13 +182,10 @@ public class ReplicationResponseTests extends ESTestCase { public void testShardInfoWithFailureToXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); - final ReplicationResponse.ShardInfo shardInfo = randomShardInfo(); + final ReplicationResponse.ShardInfo shardInfo = RandomObjects.randomShardInfo(random(), true); final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType); try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) { - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); - assertEquals("_shards", parser.currentName()); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals("total", parser.currentName()); @@ -216,7 +211,6 @@ public class ReplicationResponseTests extends ESTestCase { assertEquals(XContentParser.Token.END_ARRAY, parser.nextToken()); } - assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); } @@ -225,15 +219,14 @@ public class ReplicationResponseTests extends ESTestCase { public void testRandomShardInfoFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); - final ReplicationResponse.ShardInfo shardInfo = randomShardInfo(); + final ReplicationResponse.ShardInfo shardInfo = RandomObjects.randomShardInfo(random(), randomBoolean()); final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType); ReplicationResponse.ShardInfo parsedShardInfo; try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) { - // Move to the start object that was manually added when building the object + // Move to the first start object assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); parsedShardInfo = ReplicationResponse.ShardInfo.fromXContent(parser); - assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); } @@ -266,7 +259,7 @@ public class ReplicationResponseTests extends ESTestCase { public void testRandomFailureToXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); - final ReplicationResponse.ShardInfo.Failure shardInfoFailure = randomFailure(); + final ReplicationResponse.ShardInfo.Failure shardInfoFailure = RandomObjects.randomShardInfoFailure(random()); final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfoFailure, xContentType); try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) { @@ -277,7 +270,7 @@ public class ReplicationResponseTests extends ESTestCase { public void testRandomFailureToAndFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); - final ReplicationResponse.ShardInfo.Failure shardInfoFailure = randomFailure(); + final ReplicationResponse.ShardInfo.Failure shardInfoFailure = RandomObjects.randomShardInfoFailure(random());; final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfoFailure, xContentType); ReplicationResponse.ShardInfo.Failure parsedFailure; @@ -358,32 +351,4 @@ public class ReplicationResponseTests extends ESTestCase { } assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); } - - private static ReplicationResponse.ShardInfo randomShardInfo() { - int total = randomIntBetween(1, 10); - int successful = randomIntBetween(0, total); - return new ReplicationResponse.ShardInfo(total, successful, randomFailures(Math.max(0, (total - successful)))); - } - - private static ReplicationResponse.ShardInfo.Failure[] randomFailures(int nbFailures) { - List randomFailures = new ArrayList<>(nbFailures); - for (int i = 0; i < nbFailures; i++) { - randomFailures.add(randomFailure()); - } - return randomFailures.toArray(new ReplicationResponse.ShardInfo.Failure[nbFailures]); - } - - private static ReplicationResponse.ShardInfo.Failure randomFailure() { - return new ReplicationResponse.ShardInfo.Failure( - new ShardId(randomAsciiOfLength(5), randomAsciiOfLength(5), randomIntBetween(0, 5)), - randomAsciiOfLength(3), - randomFrom( - new IndexShardRecoveringException(new ShardId("_test", "_0", 5)), - new ElasticsearchException(new IllegalArgumentException("argument is wrong")), - new RoutingMissingException("_test", "_type", "_id") - ), - randomFrom(RestStatus.values()), - randomBoolean() - ); - } } diff --git a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index fff3b3cc3af..160c14c243c 100644 --- a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -43,7 +43,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase { Settings settings = HEADER_SETTINGS; Actions actions = new Actions(settings, threadPool, testedActions); NodeClient client = new NodeClient(settings, threadPool); - client.initialize(actions); + client.initialize(actions, () -> "test"); return client; } diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index ee6d719f55b..d63203eda25 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -375,7 +375,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { otherNodes.remove(master); NetworkDisruption partition = new NetworkDisruption( new TwoPartitions(Collections.singleton(master), otherNodes), - new NetworkDelay(TimeValue.timeValueMinutes(1))); + new NetworkDisruption.NetworkDisconnect()); internalCluster().setDisruptionScheme(partition); final CountDownLatch latch = new CountDownLatch(1); diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 2b8333700a3..1d7a65e3224 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -87,19 +87,19 @@ public class NodeConnectionsServiceTests extends ESTestCase { ClusterState current = clusterStateFromNodes(Collections.emptyList()); ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); - service.connectToNodes(event.nodesDelta().addedNodes()); - assertConnected(event.nodesDelta().addedNodes()); + service.connectToNodes(event.state().nodes()); + assertConnected(event.state().nodes()); - service.disconnectFromNodes(event.nodesDelta().removedNodes()); + service.disconnectFromNodesExcept(event.state().nodes()); assertConnectedExactlyToNodes(event.state()); current = event.state(); event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); - service.connectToNodes(event.nodesDelta().addedNodes()); - assertConnected(event.nodesDelta().addedNodes()); + service.connectToNodes(event.state().nodes()); + assertConnected(event.state().nodes()); - service.disconnectFromNodes(event.nodesDelta().removedNodes()); + service.disconnectFromNodesExcept(event.state().nodes()); assertConnectedExactlyToNodes(event.state()); } diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index cc76fdf9dc7..df813fb4e16 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -25,12 +25,12 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -127,12 +127,12 @@ public class ClusterServiceTests extends ESTestCase { emptySet(), Version.CURRENT)); timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToNodes(List addedNodes) { + public void connectToNodes(Iterable discoveryNodes) { // skip } @Override - public void disconnectFromNodes(List removedNodes) { + public void disconnectFromNodesExcept(Iterable nodesToKeep) { // skip } }); @@ -1058,17 +1058,18 @@ public class ClusterServiceTests extends ESTestCase { threadPool); timedClusterService.setLocalNode(new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); - Set currentNodes = Collections.synchronizedSet(new HashSet<>()); - currentNodes.add(timedClusterService.localNode()); + Set currentNodes = new HashSet<>(); timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToNodes(List addedNodes) { - currentNodes.addAll(addedNodes); + public void connectToNodes(Iterable discoveryNodes) { + discoveryNodes.forEach(currentNodes::add); } @Override - public void disconnectFromNodes(List removedNodes) { - currentNodes.removeAll(removedNodes); + public void disconnectFromNodesExcept(Iterable nodesToKeep) { + Set nodeSet = new HashSet<>(); + nodesToKeep.iterator().forEachRemaining(nodeSet::add); + currentNodes.removeIf(node -> nodeSet.contains(node) == false); } }); AtomicBoolean failToCommit = new AtomicBoolean(); diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 851ea26a19d..df1f82f7235 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -33,12 +33,15 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Function; import static org.hamcrest.CoreMatchers.equalTo; @@ -126,6 +129,52 @@ public class ScopedSettingsTests extends ESTestCase { assertEquals(0, consumer2.get()); } + public void testAddConsumerAffix() { + Setting.AffixSetting intSetting = Setting.affixKeySetting("foo.", "bar", + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting> listSetting = Setting.affixKeySetting("foo.", "list", + (k) -> Setting.listSetting(k, Arrays.asList("1"), Integer::parseInt, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(intSetting, listSetting))); + Map> listResults = new HashMap<>(); + Map intResults = new HashMap<>(); + + BiConsumer intConsumer = intResults::put; + BiConsumer> listConsumer = listResults::put; + + service.addAffixUpdateConsumer(listSetting, listConsumer, (s, k) -> {}); + service.addAffixUpdateConsumer(intSetting, intConsumer, (s, k) -> {}); + assertEquals(0, listResults.size()); + assertEquals(0, intResults.size()); + service.applySettings(Settings.builder() + .put("foo.test.bar", 2) + .put("foo.test_1.bar", 7) + .putArray("foo.test_list.list", "16", "17") + .putArray("foo.test_list_1.list", "18", "19", "20") + .build()); + assertEquals(2, intResults.get("test").intValue()); + assertEquals(7, intResults.get("test_1").intValue()); + assertEquals(Arrays.asList(16, 17), listResults.get("test_list")); + assertEquals(Arrays.asList(18, 19, 20), listResults.get("test_list_1")); + assertEquals(2, listResults.size()); + assertEquals(2, intResults.size()); + + listResults.clear(); + intResults.clear(); + + service.applySettings(Settings.builder() + .put("foo.test.bar", 2) + .put("foo.test_1.bar", 8) + .putArray("foo.test_list.list", "16", "17") + .putNull("foo.test_list_1.list") + .build()); + assertNull("test wasn't changed", intResults.get("test")); + assertEquals(8, intResults.get("test_1").intValue()); + assertNull("test_list wasn't changed", listResults.get("test_list")); + assertEquals(Arrays.asList(1), listResults.get("test_list_1")); // reset to default + assertEquals(1, listResults.size()); + assertEquals(1, intResults.size()); + } + public void testApply() { Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); @@ -214,7 +263,8 @@ public class ScopedSettingsTests extends ESTestCase { Setting fooBarBaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope); Setting fooBar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); Setting someGroup = Setting.groupSetting("some.group.", Property.Dynamic, Property.NodeScope); - Setting someAffix = Setting.affixKeySetting("some.prefix.", "somekey", "true", Boolean::parseBoolean, Property.NodeScope); + Setting someAffix = Setting.affixKeySetting("some.prefix.", "somekey", (key) -> Setting.boolSetting(key, true, + Property.NodeScope)); Setting> foorBarQuux = Setting.listSetting("foo.bar.quux", Arrays.asList("a", "b", "c"), Function.identity(), Property.NodeScope); ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(fooBar, fooBarBaz, foorBarQuux, @@ -253,6 +303,65 @@ public class ScopedSettingsTests extends ESTestCase { assertThat(diff.getAsInt("foo.bar", null), equalTo(1)); } + public void testDiffWithAffixAndComplexMatcher() { + Setting fooBarBaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope); + Setting fooBar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting someGroup = Setting.groupSetting("some.group.", Property.Dynamic, Property.NodeScope); + Setting someAffix = Setting.affixKeySetting("some.prefix.", "somekey", (key) -> Setting.boolSetting(key, true, + Property.NodeScope)); + Setting> foorBarQuux = Setting.affixKeySetting("foo.", "quux", + (key) -> Setting.listSetting(key, Arrays.asList("a", "b", "c"), Function.identity(), Property.NodeScope)); + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(fooBar, fooBarBaz, foorBarQuux, + someGroup, someAffix))); + Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); + assertEquals(1, diff.getAsMap().size()); + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); + assertNull(diff.getAsArray("foo.bar.quux", null)); // affix settings don't know their concrete keys + + diff = settings.diff( + Settings.builder().put("foo.bar", 5).build(), + Settings.builder().put("foo.bar.baz", 17).putArray("foo.bar.quux", "d", "e", "f").build()); + assertEquals(4, diff.getAsMap().size()); + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(17)); + assertArrayEquals(diff.getAsArray("foo.bar.quux", null), new String[] {"d", "e", "f"}); + + diff = settings.diff( + Settings.builder().put("some.group.foo", 5).build(), + Settings.builder().put("some.group.foobar", 17, "some.group.foo", 25).build()); + assertEquals(3, diff.getAsMap().size()); + assertThat(diff.getAsInt("some.group.foobar", null), equalTo(17)); + assertNull(diff.get("some.group.foo")); + assertNull(diff.getAsArray("foo.bar.quux", null)); // affix settings don't know their concrete keys + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); + assertThat(diff.getAsInt("foo.bar", null), equalTo(1)); + + diff = settings.diff( + Settings.builder().put("some.prefix.foo.somekey", 5).build(), + Settings.builder().put("some.prefix.foobar.somekey", 17, + "some.prefix.foo.somekey", 18).build()); + assertEquals(3, diff.getAsMap().size()); + assertThat(diff.getAsInt("some.prefix.foobar.somekey", null), equalTo(17)); + assertNull(diff.get("some.prefix.foo.somekey")); + assertNull(diff.getAsArray("foo.bar.quux", null)); // affix settings don't know their concrete keys + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); + assertThat(diff.getAsInt("foo.bar", null), equalTo(1)); + + diff = settings.diff( + Settings.builder().put("some.prefix.foo.somekey", 5).build(), + Settings.builder().put("some.prefix.foobar.somekey", 17, + "some.prefix.foo.somekey", 18) + .putArray("foo.bar.quux", "x", "y", "z") + .putArray("foo.baz.quux", "d", "e", "f") + .build()); + assertEquals(9, diff.getAsMap().size()); + assertThat(diff.getAsInt("some.prefix.foobar.somekey", null), equalTo(17)); + assertNull(diff.get("some.prefix.foo.somekey")); + assertArrayEquals(diff.getAsArray("foo.bar.quux", null), new String[] {"x", "y", "z"}); + assertArrayEquals(diff.getAsArray("foo.baz.quux", null), new String[] {"d", "e", "f"}); + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); + assertThat(diff.getAsInt("foo.bar", null), equalTo(1)); + } + public void testUpdateTracer() { ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AtomicReference> ref = new AtomicReference<>(); diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 2bd5dea3c10..4ce23ebcaf0 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -426,7 +426,7 @@ public class SettingTests extends ESTestCase { } public void testDynamicKeySetting() { - Setting setting = Setting.prefixKeySetting("foo.", "false", Boolean::parseBoolean, Property.NodeScope); + Setting setting = Setting.prefixKeySetting("foo.", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar")); assertFalse(setting.match("foo")); @@ -444,11 +444,11 @@ public class SettingTests extends ESTestCase { public void testAffixKeySetting() { Setting setting = - Setting.affixKeySetting("foo.", "enable", "false", Boolean::parseBoolean, Property.NodeScope); + Setting.affixKeySetting("foo.", "enable", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar.enable")); assertTrue(setting.match("foo.baz.enable")); - assertTrue(setting.match("foo.bar.baz.enable")); + assertFalse(setting.match("foo.bar.baz.enable")); assertFalse(setting.match("foo.bar")); assertFalse(setting.match("foo.bar.baz.enabled")); assertFalse(setting.match("foo")); @@ -459,11 +459,23 @@ public class SettingTests extends ESTestCase { IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> setting.getConcreteSetting("foo")); assertEquals("key [foo] must match [foo.*.enable] but didn't.", exc.getMessage()); - exc = expectThrows(IllegalArgumentException.class, () -> Setting.affixKeySetting("foo", "enable", "false", - Boolean::parseBoolean, Property.NodeScope)); + exc = expectThrows(IllegalArgumentException.class, () -> Setting.affixKeySetting("foo", "enable", + (key) -> Setting.boolSetting(key, false, Property.NodeScope))); assertEquals("prefix must end with a '.'", exc.getMessage()); + + Setting> listAffixSetting = Setting.affixKeySetting("foo.", "bar", + (key) -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); + + assertTrue(listAffixSetting.hasComplexMatcher()); + assertTrue(listAffixSetting.match("foo.test.bar")); + assertTrue(listAffixSetting.match("foo.test_1.bar")); + assertFalse(listAffixSetting.match("foo.buzz.baz.bar")); + assertFalse(listAffixSetting.match("foo.bar")); + assertFalse(listAffixSetting.match("foo.baz")); + assertFalse(listAffixSetting.match("foo")); } + public void testMinMaxInt() { Setting integerSetting = Setting.intSetting("foo.bar", 1, 0, 10, Property.NodeScope); try { @@ -530,4 +542,5 @@ public class SettingTests extends ESTestCase { assertThat(setting.get(Settings.builder().put("foo", "12h").build()), equalTo(TimeValue.timeValueHours(12))); assertThat(setting.get(Settings.EMPTY).getMillis(), equalTo(random.getMillis() * factor)); } + } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index af4b2b826f5..8b5ebe71e95 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -19,19 +19,6 @@ package org.elasticsearch.discovery.zen; -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; @@ -52,7 +39,6 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.PublishClusterStateActionTests.AssertingAckListener; @@ -67,6 +53,20 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; @@ -173,38 +173,38 @@ public class ZenDiscoveryUnitTests extends ESTestCase { Settings settings = Settings.builder() .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build(); - ArrayList toClose = new ArrayList<>(); + ArrayDeque toClose = new ArrayDeque<>(); try { Set expectedFDNodes = null; final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); masterTransport.start(); DiscoveryNode masterNode = new DiscoveryNode("master", masterTransport.boundAddress().publishAddress(), Version.CURRENT); - toClose.add(masterTransport); + toClose.addFirst(masterTransport); masterTransport.setLocalNode(masterNode); ClusterState state = ClusterStateCreationUtils.state(masterNode, masterNode, masterNode); // build the zen discovery and cluster service ClusterService masterClusterService = createClusterService(threadPool, masterNode); - toClose.add(masterClusterService); + toClose.addFirst(masterClusterService); // TODO: clustername shouldn't be stored twice in cluster service, but for now, work around it state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build(); setState(masterClusterService, state); ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool); - toClose.add(masterZen); + toClose.addFirst(masterZen); masterTransport.acceptIncomingRequests(); final MockTransportService otherTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); otherTransport.start(); - toClose.add(otherTransport); + toClose.addFirst(otherTransport); DiscoveryNode otherNode = new DiscoveryNode("other", otherTransport.boundAddress().publishAddress(), Version.CURRENT); otherTransport.setLocalNode(otherNode); final ClusterState otherState = ClusterState.builder(masterClusterService.getClusterName()) .nodes(DiscoveryNodes.builder().add(otherNode).localNodeId(otherNode.getId())).build(); ClusterService otherClusterService = createClusterService(threadPool, masterNode); - toClose.add(otherClusterService); + toClose.addFirst(otherClusterService); setState(otherClusterService, otherState); ZenDiscovery otherZen = buildZenDiscovery(settings, otherTransport, otherClusterService, threadPool); - toClose.add(otherZen); + toClose.addFirst(otherZen); otherTransport.acceptIncomingRequests(); masterTransport.connectToNode(otherNode); @@ -244,21 +244,21 @@ public class ZenDiscoveryUnitTests extends ESTestCase { Settings settings = Settings.builder() .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build(); - ArrayList toClose = new ArrayList<>(); + ArrayDeque toClose = new ArrayDeque<>(); try { final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); masterTransport.start(); DiscoveryNode masterNode = new DiscoveryNode("master", masterTransport.boundAddress().publishAddress(), Version.CURRENT); - toClose.add(masterTransport); + toClose.addFirst(masterTransport); masterTransport.setLocalNode(masterNode); ClusterState state = ClusterStateCreationUtils.state(masterNode, null, masterNode); // build the zen discovery and cluster service ClusterService masterClusterService = createClusterService(threadPool, masterNode); - toClose.add(masterClusterService); + toClose.addFirst(masterClusterService); state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build(); setState(masterClusterService, state); ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool); - toClose.add(masterZen); + toClose.addFirst(masterZen); masterTransport.acceptIncomingRequests(); // inject a pending cluster state diff --git a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index a84f78ca3d9..9161bc413c8 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -22,11 +22,13 @@ package org.elasticsearch.index.engine; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.RamUsageTester; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.test.ESTestCase; public class LiveVersionMapTests extends ESTestCase { public void testRamBytesUsed() throws Exception { + assumeTrue("Test disabled for JDK 9", JavaVersion.current().compareTo(JavaVersion.parse("9")) < 0); LiveVersionMap map = new LiveVersionMap(); for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); diff --git a/core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java b/core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java index 0425f09a402..6ce3d802765 100644 --- a/core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -43,7 +42,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXC public class GetFieldTests extends ESTestCase { - public void testToXContent() throws IOException { + public void testToXContent() { GetField getField = new GetField("field", Arrays.asList("value1", "value2")); String output = Strings.toString(getField); assertEquals("{\"field\":[\"value1\",\"value2\"]}", output); diff --git a/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java index 0d0008cd1ce..b0e3b87d37d 100644 --- a/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java +++ b/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java @@ -83,7 +83,7 @@ public class GetResultTests extends ESTestCase { XContentType xContentType = randomFrom(XContentType.values()); Tuple tuple = randomGetResult(xContentType); GetResult getResult = tuple.v1(); - if (getResult.isExists()) { + if (getResult.isExists() && getResult.isSourceEmpty() == false) { assertNotNull(getResult.sourceRef()); } else { assertNull(getResult.sourceRef()); diff --git a/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java b/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java index 0cd185bc03a..2381b8bdc38 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java @@ -65,7 +65,9 @@ public class MatchQueryIT extends ESIntegTestCase { assertAcked(builder.addMapping(INDEX, createMapping())); ensureGreen(); + } + private List getDocs() { List builders = new ArrayList<>(); builders.add(client().prepareIndex("test", "test", "1").setSource("field", "say wtf happened foo")); builders.add(client().prepareIndex("test", "test", "2").setSource("field", "bar baz what the fudge man")); @@ -74,7 +76,7 @@ public class MatchQueryIT extends ESIntegTestCase { builders.add(client().prepareIndex("test", "test", "5").setSource("field", "bar two three")); builders.add(client().prepareIndex("test", "test", "6").setSource("field", "bar baz two three")); - indexRandom(true, false, builders); + return builders; } /** @@ -97,6 +99,8 @@ public class MatchQueryIT extends ESIntegTestCase { } public void testSimpleMultiTermPhrase() throws ExecutionException, InterruptedException { + indexRandom(true, false, getDocs()); + // first search using regular synonym field using phrase SearchResponse searchResponse = client().prepareSearch(INDEX) .setQuery(QueryBuilders.matchPhraseQuery("field", "foo two three").analyzer("lower_syns")).get(); @@ -115,6 +119,8 @@ public class MatchQueryIT extends ESIntegTestCase { } public void testSimpleMultiTermAnd() throws ExecutionException, InterruptedException { + indexRandom(true, false, getDocs()); + // first search using regular synonym field using phrase SearchResponse searchResponse = client().prepareSearch(INDEX).setQuery(QueryBuilders.matchQuery("field", "say what the fudge") .operator(Operator.AND).analyzer("lower_syns")).get(); @@ -132,6 +138,8 @@ public class MatchQueryIT extends ESIntegTestCase { } public void testMinShouldMatch() throws ExecutionException, InterruptedException { + indexRandom(true, false, getDocs()); + // no min should match SearchResponse searchResponse = client().prepareSearch(INDEX).setQuery(QueryBuilders.matchQuery("field", "three what the fudge foo") .operator(Operator.OR).analyzer("lower_graphsyns")).get(); @@ -150,4 +158,46 @@ public class MatchQueryIT extends ESIntegTestCase { assertHitCount(searchResponse, 3L); assertSearchHits(searchResponse, "1", "2", "6"); } + + public void testPhrasePrefix() throws ExecutionException, InterruptedException { + List builders = getDocs(); + builders.add(client().prepareIndex("test", "test", "7").setSource("field", "WTFD!")); + builders.add(client().prepareIndex("test", "test", "8").setSource("field", "Weird Al's WHAT THE FUDGESICLE")); + indexRandom(true, false, builders); + + SearchResponse searchResponse = client().prepareSearch(INDEX).setQuery(QueryBuilders.matchPhrasePrefixQuery("field", "wtf") + .analyzer("lower_graphsyns")).get(); + + assertHitCount(searchResponse, 5L); + assertSearchHits(searchResponse, "1", "2", "3", "7", "8"); + } + + public void testCommonTerms() throws ExecutionException, InterruptedException { + String route = "commonTermsTest"; + List builders = getDocs(); + for (IndexRequestBuilder indexRequet : builders) { + // route all docs to same shard for this test + indexRequet.setRouting(route); + } + indexRandom(true, false, builders); + + // do a search with no cutoff frequency to show which docs should match + SearchResponse searchResponse = client().prepareSearch(INDEX) + .setRouting(route) + .setQuery(QueryBuilders.matchQuery("field", "foo three happened") + .operator(Operator.OR).analyzer("lower_graphsyns")).get(); + + assertHitCount(searchResponse, 4L); + assertSearchHits(searchResponse, "1", "2", "5", "6"); + + // do same search with cutoff and see less documents match + // in this case, essentially everything but "happened" gets excluded + searchResponse = client().prepareSearch(INDEX) + .setRouting(route) + .setQuery(QueryBuilders.matchQuery("field", "foo three happened") + .operator(Operator.OR).analyzer("lower_graphsyns").cutoffFrequency(1f)).get(); + + assertHitCount(searchResponse, 1L); + assertSearchHits(searchResponse, "1"); + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java new file mode 100644 index 00000000000..dd05c10af96 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.test.ESTestCase; + +public class ShardIdTests extends ESTestCase { + + public void testShardIdFromString() { + String indexName = randomAsciiOfLengthBetween(3,50); + int shardId = randomInt(); + ShardId id = ShardId.fromString("["+indexName+"]["+shardId+"]"); + assertEquals(indexName, id.getIndexName()); + assertEquals(shardId, id.getId()); + assertEquals(indexName, id.getIndex().getName()); + assertEquals(IndexMetaData.INDEX_UUID_NA_VALUE, id.getIndex().getUUID()); + + id = ShardId.fromString("[some]weird[0]Name][-125]"); + assertEquals("some]weird[0]Name", id.getIndexName()); + assertEquals(-125, id.getId()); + assertEquals("some]weird[0]Name", id.getIndex().getName()); + assertEquals(IndexMetaData.INDEX_UUID_NA_VALUE, id.getIndex().getUUID()); + + String badId = indexName + "," + shardId; // missing separator + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> ShardId.fromString(badId)); + assertEquals("Unexpected shardId string format, expected [indexName][shardId] but got " + badId, ex.getMessage()); + + String badId2 = indexName + "][" + shardId + "]"; // missing opening bracket + ex = expectThrows(IllegalArgumentException.class, + () -> ShardId.fromString(badId2)); + + String badId3 = "[" + indexName + "][" + shardId; // missing closing bracket + ex = expectThrows(IllegalArgumentException.class, + () -> ShardId.fromString(badId3)); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index a2a6620a6c2..a3e3f611b21 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -23,6 +23,9 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.store.AlreadyClosedException; @@ -31,6 +34,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -47,6 +51,12 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.Operation.Origin; +import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; @@ -67,6 +77,7 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -297,14 +308,14 @@ public class TranslogTests extends ESTestCase { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(2L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(125L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(139L)); } translog.add(new Translog.Delete(newUid("3"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(3L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(153L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(181L)); } final long seqNo = 1; @@ -313,10 +324,10 @@ public class TranslogTests extends ESTestCase { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(195L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(223L)); } - final long expectedSizeInBytes = 238L; + final long expectedSizeInBytes = 266L; translog.prepareCommit(); { final TranslogStats stats = stats(); @@ -1993,4 +2004,47 @@ public class TranslogTests extends ESTestCase { public static Translog.Location randomTranslogLocation() { return new Translog.Location(randomLong(), randomLong(), randomInt()); } + + public void testTranslogOpSerialization() throws Exception { + BytesReference B_1 = new BytesArray(new byte[]{1}); + SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers"; + long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong(); + long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong(); + seqID.seqNo.setLongValue(randomSeqNum); + seqID.seqNoDocValue.setLongValue(randomSeqNum); + seqID.primaryTerm.setLongValue(randomPrimaryTerm); + Field uidField = new Field("_uid", "1", UidFieldMapper.Defaults.FIELD_TYPE); + Field versionField = new NumericDocValuesField("_version", 1); + Document document = new Document(); + document.add(new TextField("value", "test", Field.Store.YES)); + document.add(uidField); + document.add(versionField); + document.add(seqID.seqNo); + document.add(seqID.seqNoDocValue); + document.add(seqID.primaryTerm); + ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, null); + + Engine.Index eIndex = new Engine.Index(newUid("1"), doc, randomSeqNum, randomPrimaryTerm, + 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); + Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); + Translog.Index index = new Translog.Index(eIndex, eIndexResult); + + BytesStreamOutput out = new BytesStreamOutput(); + index.writeTo(out); + StreamInput in = out.bytes().streamInput(); + Translog.Index serializedIndex = new Translog.Index(in); + assertEquals(index, serializedIndex); + + Engine.Delete eDelete = new Engine.Delete("type", "1", newUid("1"), randomSeqNum, randomPrimaryTerm, + 2, VersionType.INTERNAL, Origin.PRIMARY, 0); + Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); + Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); + + out = new BytesStreamOutput(); + delete.writeTo(out); + in = out.bytes().streamInput(); + Translog.Delete serializedDelete = new Translog.Delete(in); + assertEquals(delete, serializedDelete); + } } diff --git a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java index 87c1fe66044..0163c98692e 100644 --- a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java @@ -53,7 +53,6 @@ import java.util.function.Supplier; public abstract class AbstractSearchTestCase extends ESTestCase { protected NamedWriteableRegistry namedWriteableRegistry; - protected SearchRequestParsers searchRequestParsers; private TestSearchExtPlugin searchExtPlugin; private NamedXContentRegistry xContentRegistry; @@ -67,7 +66,6 @@ public abstract class AbstractSearchTestCase extends ESTestCase { entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); - searchRequestParsers = searchModule.getSearchRequestParsers(); } @Override diff --git a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java index a56886a8bf0..2512dbdfeac 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -318,7 +318,7 @@ public class SearchModuleTests extends ModuleTestCase { } @Override - public String getWriteableName() { + public String getType() { return "test"; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 6f5a54965e3..91e8566e319 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -27,11 +28,21 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.support.ValueType; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + public class TermsAggregatorTests extends AggregatorTestCase { public void testTermsAggregator() throws Exception { @@ -83,4 +94,108 @@ public class TermsAggregatorTests extends AggregatorTestCase { directory.close(); } + public void testMixLongAndDouble() throws IOException { + for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.LONG) + .executionHint(executionMode.toString()) + .field("number") + .order(Terms.Order.term(true)); + List aggs = new ArrayList<> (); + int numLongs = randomIntBetween(1, 3); + for (int i = 0; i < numLongs; i++) { + final Directory dir; + try (IndexReader reader = createIndexWithLongs()) { + dir = ((DirectoryReader) reader).directory(); + IndexSearcher searcher = new IndexSearcher(reader); + MappedFieldType fieldType = + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + fieldType.setName("number"); + fieldType.setHasDocValues(true); + aggs.add(buildInternalAggregation(aggregationBuilder, fieldType, searcher)); + } + dir.close(); + } + int numDoubles = randomIntBetween(1, 3); + for (int i = 0; i < numDoubles; i++) { + final Directory dir; + try (IndexReader reader = createIndexWithDoubles()) { + dir = ((DirectoryReader) reader).directory(); + IndexSearcher searcher = new IndexSearcher(reader); + MappedFieldType fieldType = + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + fieldType.setName("number"); + fieldType.setHasDocValues(true); + aggs.add(buildInternalAggregation(aggregationBuilder, fieldType, searcher)); + } + dir.close(); + } + InternalAggregation.ReduceContext ctx = + new InternalAggregation.ReduceContext(new MockBigArrays(Settings.EMPTY, + new NoneCircuitBreakerService()), null); + for (InternalAggregation internalAgg : aggs) { + InternalAggregation mergedAggs = internalAgg.doReduce(aggs, ctx); + assertTrue(mergedAggs instanceof DoubleTerms); + long expected = numLongs + numDoubles; + List buckets = ((DoubleTerms) mergedAggs).getBuckets(); + assertEquals(4, buckets.size()); + assertEquals("1.0", buckets.get(0).getKeyAsString()); + assertEquals(expected, buckets.get(0).getDocCount()); + assertEquals("10.0", buckets.get(1).getKeyAsString()); + assertEquals(expected * 2, buckets.get(1).getDocCount()); + assertEquals("100.0", buckets.get(2).getKeyAsString()); + assertEquals(expected * 2, buckets.get(2).getDocCount()); + assertEquals("1000.0", buckets.get(3).getKeyAsString()); + assertEquals(expected, buckets.get(3).getDocCount()); + } + } + } + + private IndexReader createIndexWithLongs() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + Document document = new Document(); + document.add(new SortedNumericDocValuesField("number", 10)); + document.add(new SortedNumericDocValuesField("number", 100)); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedNumericDocValuesField("number", 1)); + document.add(new SortedNumericDocValuesField("number", 100)); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedNumericDocValuesField("number", 10)); + document.add(new SortedNumericDocValuesField("number", 1000)); + indexWriter.addDocument(document); + indexWriter.close(); + return DirectoryReader.open(directory); + } + + private IndexReader createIndexWithDoubles() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + Document document = new Document(); + document.add(new SortedNumericDocValuesField("number", NumericUtils.doubleToSortableLong(10.0d))); + document.add(new SortedNumericDocValuesField("number", NumericUtils.doubleToSortableLong(100.0d))); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedNumericDocValuesField("number", NumericUtils.doubleToSortableLong(1.0d))); + document.add(new SortedNumericDocValuesField("number", NumericUtils.doubleToSortableLong(100.0d))); + indexWriter.addDocument(document); + document = new Document(); + document.add(new SortedNumericDocValuesField("number", NumericUtils.doubleToSortableLong(10.0d))); + document.add(new SortedNumericDocValuesField("number", NumericUtils.doubleToSortableLong(1000.0d))); + indexWriter.addDocument(document); + indexWriter.close(); + return DirectoryReader.open(directory); + } + + private InternalAggregation buildInternalAggregation(TermsAggregationBuilder builder, MappedFieldType fieldType, + IndexSearcher searcher) throws IOException { + try (TermsAggregator aggregator = createAggregator(builder, fieldType, searcher)) { + aggregator.preCollection(); + searcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + return aggregator.buildAggregation(0L); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java index 6580be52555..027ae53a23d 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java @@ -38,7 +38,7 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashC public class HighlightFieldTests extends ESTestCase { public static HighlightField createTestItem() { - String name = frequently() ? randomAsciiOfLengthBetween(1, 20) : randomRealisticUnicodeOfCodepointLengthBetween(1, 20); + String name = frequently() ? randomAsciiOfLengthBetween(5, 20) : randomRealisticUnicodeOfCodepointLengthBetween(5, 20); Text[] fragments = null; if (frequently()) { int size = randomIntBetween(0, 5); @@ -63,6 +63,7 @@ public class HighlightFieldTests extends ESTestCase { builder.endObject(); XContentParser parser = createParser(builder); parser.nextToken(); // skip to the opening object token, fromXContent advances from here and starts with the field name + parser.nextToken(); HighlightField parsedField = HighlightField.fromXContent(parser); assertEquals(highlightField, parsedField); if (highlightField.fragments() != null) { diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java index 2cb425d5274..f42b79670c1 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java @@ -19,48 +19,168 @@ package org.elasticsearch.search.internal; +import org.apache.lucene.search.Explanation; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightFieldTests; +import org.elasticsearch.search.internal.InternalSearchHit.InternalNestedIdentity; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.RandomObjects; +import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Set; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class InternalSearchHitTests extends ESTestCase { + private static Set META_FIELDS = Sets.newHashSet("_uid", "_all", "_parent", "_routing", "_size", "_timestamp", "_ttl"); + + public static InternalSearchHit createTestItem(boolean withOptionalInnerHits) { + int internalId = randomInt(); + String uid = randomAsciiOfLength(10); + Text type = new Text(randomAsciiOfLengthBetween(5, 10)); + InternalNestedIdentity nestedIdentity = null; + if (randomBoolean()) { + nestedIdentity = InternalNestedIdentityTests.createTestItem(randomIntBetween(0, 2)); + } + Map fields = new HashMap<>(); + if (randomBoolean()) { + int size = randomIntBetween(0, 10); + for (int i = 0; i < size; i++) { + Tuple, List> values = RandomObjects.randomStoredFieldValues(random(), + XContentType.JSON); + if (randomBoolean()) { + String metaField = randomFrom(META_FIELDS); + fields.put(metaField, new InternalSearchHitField(metaField, values.v1())); + } else { + String fieldName = randomAsciiOfLengthBetween(5, 10); + fields.put(fieldName, new InternalSearchHitField(fieldName, values.v1())); + } + } + } + InternalSearchHit hit = new InternalSearchHit(internalId, uid, type, nestedIdentity, fields); + if (frequently()) { + if (rarely()) { + hit.score(Float.NaN); + } else { + hit.score(randomFloat()); + } + } + if (frequently()) { + hit.sourceRef(RandomObjects.randomSource(random())); + } + if (randomBoolean()) { + hit.version(randomLong()); + } + if (randomBoolean()) { + hit.sortValues(SearchSortValuesTests.createTestItem()); + } + if (randomBoolean()) { + int size = randomIntBetween(0, 5); + Map highlightFields = new HashMap<>(size); + for (int i = 0; i < size; i++) { + highlightFields.put(randomAsciiOfLength(5), HighlightFieldTests.createTestItem()); + } + hit.highlightFields(highlightFields); + } + if (randomBoolean()) { + int size = randomIntBetween(0, 5); + String[] matchedQueries = new String[size]; + for (int i = 0; i < size; i++) { + matchedQueries[i] = randomAsciiOfLength(5); + } + hit.matchedQueries(matchedQueries); + } + if (randomBoolean()) { + hit.explanation(createExplanation(randomIntBetween(0, 5))); + } + if (withOptionalInnerHits) { + int innerHitsSize = randomIntBetween(0, 3); + Map innerHits = new HashMap<>(innerHitsSize); + for (int i = 0; i < innerHitsSize; i++) { + innerHits.put(randomAsciiOfLength(5), InternalSearchHitsTests.createTestItem()); + } + hit.setInnerHits(innerHits); + } + if (randomBoolean()) { + hit.shard(new SearchShardTarget(randomAsciiOfLengthBetween(5, 10), + new ShardId(new Index(randomAsciiOfLengthBetween(5, 10), randomAsciiOfLengthBetween(5, 10)), randomInt()))); + } + return hit; + } + + public void testFromXContent() throws IOException { + InternalSearchHit searchHit = createTestItem(true); + XContentType xcontentType = randomFrom(XContentType.values()); + XContentBuilder builder = XContentFactory.contentBuilder(xcontentType); + builder = searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + parser.nextToken(); // jump to first START_OBJECT + InternalSearchHit parsed = InternalSearchHit.fromXContent(parser); + assertToXContentEquivalent(builder.bytes(), toXContent(parsed, xcontentType), xcontentType); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + + public void testToXContent() throws IOException { + InternalSearchHit internalSearchHit = new InternalSearchHit(1, "id1", new Text("type"), Collections.emptyMap()); + internalSearchHit.score(1.5f); + XContentBuilder builder = JsonXContent.contentBuilder(); + internalSearchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals("{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":1.5}", builder.string()); + } + public void testSerializeShardTarget() throws Exception { SearchShardTarget target = new SearchShardTarget("_node_id", new Index("_index", "_na_"), 0); Map innerHits = new HashMap<>(); InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); - innerHit1.shardTarget(target); + innerHit1.shard(target); InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); - innerInnerHit2.shardTarget(target); + innerInnerHit2.shard(target); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerInnerHit2}, 1, 1f)); innerHit1.setInnerHits(innerHits); InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); - innerHit2.shardTarget(target); + innerHit2.shard(target); InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new Text("_type"), null); - innerHit3.shardTarget(target); + innerHit3.shard(target); innerHits = new HashMap<>(); InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerHit1, innerHit2}, 1, 1f)); innerHits.put("2", new InternalSearchHits(new InternalSearchHit[]{innerHit3}, 1, 1f)); - hit1.shardTarget(target); + hit1.shard(target); hit1.setInnerHits(innerHits); InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); - hit2.shardTarget(target); + hit2.shard(target); InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f); @@ -94,4 +214,17 @@ public class InternalSearchHitTests extends ESTestCase { searchHit.sourceRef(new BytesArray("{}")); assertTrue(searchHit.hasSource()); } + + private static Explanation createExplanation(int depth) { + String description = randomAsciiOfLengthBetween(5, 20); + float value = randomFloat(); + List details = new ArrayList<>(); + if (depth > 0) { + int numberOfDetails = randomIntBetween(1, 3); + for (int i = 0; i < numberOfDetails; i++) { + details.add(createExplanation(depth - 1)); + } + } + return Explanation.match(value, description, details); + } } diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitsTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitsTests.java new file mode 100644 index 00000000000..5301c566d14 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitsTests.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; + +public class InternalSearchHitsTests extends ESTestCase { + + public static InternalSearchHits createTestItem() { + int searchHits = randomIntBetween(0, 5); + InternalSearchHit[] hits = new InternalSearchHit[searchHits]; + for (int i = 0; i < searchHits; i++) { + hits[i] = InternalSearchHitTests.createTestItem(false); // creating random innerHits could create loops + } + long totalHits = randomLong(); + float maxScore = frequently() ? randomFloat() : Float.NaN; + return new InternalSearchHits(hits, totalHits, maxScore); + } + + public void testFromXContent() throws IOException { + InternalSearchHits searchHits = createTestItem(); + XContentType xcontentType = XContentType.JSON; //randomFrom(XContentType.values()); + XContentBuilder builder = XContentFactory.contentBuilder(xcontentType); + builder.startObject(); + builder = searchHits.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + XContentParser parser = createParser(builder); + InternalSearchHits parsed = InternalSearchHits.fromXContent(parser); + assertToXContentEquivalent(builder.bytes(), toXContent(parsed, xcontentType), xcontentType); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + parser.nextToken(); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + + public void testToXContent() throws IOException { + InternalSearchHit[] hits = new InternalSearchHit[] { + new InternalSearchHit(1, "id1", new Text("type"), Collections.emptyMap()), + new InternalSearchHit(2, "id2", new Text("type"), Collections.emptyMap()) }; + + long totalHits = 1000; + float maxScore = 1.5f; + InternalSearchHits searchHits = new InternalSearchHits(hits, totalHits, maxScore); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + searchHits.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + assertEquals("{\"hits\":{\"total\":1000,\"max_score\":1.5," + + "\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":\"-Infinity\"},"+ + "{\"_type\":\"type\",\"_id\":\"id2\",\"_score\":\"-Infinity\"}]}}", builder.string()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/internal/SearchSortValuesTests.java b/core/src/test/java/org/elasticsearch/search/internal/SearchSortValuesTests.java index b53db0a09de..ddff230b4bc 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/SearchSortValuesTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/SearchSortValuesTests.java @@ -54,7 +54,7 @@ public class SearchSortValuesTests extends ESTestCase { valueSuppliers.add(() -> randomBoolean()); valueSuppliers.add(() -> frequently() ? randomAsciiOfLengthBetween(1, 30) : randomRealisticUnicodeOfCodepointLength(30)); - int size = randomInt(20); + int size = randomIntBetween(1, 20); Object[] values = new Object[size]; for (int i = 0; i < size; i++) { Supplier supplier = randomFrom(valueSuppliers); @@ -75,7 +75,8 @@ public class SearchSortValuesTests extends ESTestCase { builder.endObject(); XContentParser parser = createParser(builder); - parser.nextToken(); // skip to the elements field name token, fromXContent advances from there if called from ourside + parser.nextToken(); // skip to the elements start array token, fromXContent advances from there if called + parser.nextToken(); parser.nextToken(); if (sortValues.sortValues().length > 0) { SearchSortValues parsed = SearchSortValues.fromXContent(parser); diff --git a/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java b/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java index 3758715cbac..2914755ed73 100644 --- a/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java +++ b/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java @@ -90,9 +90,9 @@ public class SearchPreferenceIT extends ESIntegTestCase { final Client client = internalCluster().smartClient(); SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet(); - String firstNodeId = searchResponse.getHits().getAt(0).shard().nodeId(); + String firstNodeId = searchResponse.getHits().getAt(0).shard().getNodeId(); searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet(); - String secondNodeId = searchResponse.getHits().getAt(0).shard().nodeId(); + String secondNodeId = searchResponse.getHits().getAt(0).shard().getNodeId(); assertThat(firstNodeId, not(equalTo(secondNodeId))); } @@ -220,7 +220,7 @@ public class SearchPreferenceIT extends ESIntegTestCase { for (int i = 0; i < 2; i++) { SearchResponse searchResponse = request.execute().actionGet(); assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - hitNodes.add(searchResponse.getHits().getAt(0).shard().nodeId()); + hitNodes.add(searchResponse.getHits().getAt(0).shard().getNodeId()); } assertThat(hitNodes.size(), greaterThan(1)); } diff --git a/distribution/src/main/resources/config/elasticsearch.yml b/distribution/src/main/resources/config/elasticsearch.yml index bf806535c9b..15e841fe390 100644 --- a/distribution/src/main/resources/config/elasticsearch.yml +++ b/distribution/src/main/resources/config/elasticsearch.yml @@ -7,8 +7,8 @@ # The primary way of configuring a node is via this file. This template lists # the most important settings you may want to configure for a production cluster. # -# Please see the documentation for further information on configuration options: -# +# Please consult the documentation for further information on configuration options: +# https://www.elastic.co/guide/en/elasticsearch/reference/index.html # # ---------------------------------- Cluster ----------------------------------- # @@ -58,8 +58,7 @@ # #http.port: 9200 # -# For more information, see the documentation at: -# +# For more information, consult the network module documentation. # # --------------------------------- Discovery ---------------------------------- # @@ -68,12 +67,11 @@ # #discovery.zen.ping.unicast.hosts: ["host1", "host2"] # -# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): +# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1): # #discovery.zen.minimum_master_nodes: 3 # -# For more information, see the documentation at: -# +# For more information, consult the zen discovery module documentation. # # ---------------------------------- Gateway ----------------------------------- # @@ -81,8 +79,7 @@ # #gateway.recover_after_nodes: 3 # -# For more information, see the documentation at: -# +# For more information, consult the gateway module documentation. # # ---------------------------------- Various ----------------------------------- # diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options index 884b43a6091..ce94563c2b8 100644 --- a/distribution/src/main/resources/config/jvm.options +++ b/distribution/src/main/resources/config/jvm.options @@ -69,7 +69,6 @@ -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 --Dio.netty.allocator.type=unpooled # log4j 2 -Dlog4j.shutdownHookEnabled=false diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 684f4aab81e..0b028c1a940 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -794,3 +794,10 @@ had a value. -------------------------------------------------- <1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`. + +==== Mixing field types + +WARNING: When aggregating on multiple indices the type of the aggregated field may not be the same in all indices. +Some types are compatible with each other (`integer` and `long` or `float` and `double`) but when the types are a mix +of decimal and non-decimal number the terms aggregation will promote the non-decimal numbers to decimal numbers. +This can result in a loss of precision in the bucket values. diff --git a/docs/reference/analysis/charfilters.asciidoc b/docs/reference/analysis/charfilters.asciidoc index cd24f5bf571..47390fe0ae8 100644 --- a/docs/reference/analysis/charfilters.asciidoc +++ b/docs/reference/analysis/charfilters.asciidoc @@ -6,8 +6,8 @@ is passed to the <>. A character filter receives the original text as a stream of characters and can transform the stream by adding, removing, or changing characters. For -instance, a character filter could be used to convert Arabic numerals -(٠‎١٢٣٤٥٦٧٨‎٩‎) into their Latin equivalents (0123456789), or to strip HTML +instance, a character filter could be used to convert Hindu-Arabic numerals +(٠‎١٢٣٤٥٦٧٨‎٩‎) into their Arabic-Latin equivalents (0123456789), or to strip HTML elements like `` from the stream. diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index ef0b8f37958..efec2efe1a4 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -118,6 +118,7 @@ The special characters used for date rounding must be URI encoded as follows: `|`:: `%7C` `+`:: `%2B` `:`:: `%3A` +`,`:: `%2C` ====================================================== The following example shows different forms of date math index names and the final index names diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 972a918b145..f7d03341052 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -1,203 +1,289 @@ [[cluster-allocation-explain]] == Cluster Allocation Explain API -The cluster allocation explanation API is designed to assist in answering the -question "why is this shard unassigned?". To explain the allocation (on -unassigned state) of a shard, issue a request like: +The purpose of the cluster allocation explain API is to provide +explanations for shard allocations in the cluster. For unassigned shards, +the explain API provides an explanation for why the shard is unassigned. +For assigned shards, the explain API provides an explanation for why the +shard is remaining on its current moved and has not moved or rebalanced to +another node. This API can be very useful when attempting to diagnose why +a shard is unassigned or why a shard continues to remain on its current node +when you might expect otherwise. -experimental[The cluster allocation explain API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] +=== Explain API Request + +To explain the allocation of a shard, issue a request: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d'{ +$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d '{ "index": "myindex", "shard": 0, - "primary": false + "primary": true }' -------------------------------------------------- Specify the `index` and `shard` id of the shard you would like an explanation -for, as well as the `primary` flag to indicate whether to explain a primary or -replica shard. +for, as well as the `primary` flag to indicate whether to explain the primary +shard for the given shard id or one of its replica shards. These three request +parameters are required. -The response looks like: +You may also specify an optional `current_node` request parameter to only explain +a shard that is currently located on `current_node`. The `current_node` can be +specified as either the node id or node name. [source,js] -------------------------------------------------- -{ - "shard" : { - "index" : "myindex", - "index_uuid" : "KnW0-zELRs6PK84l0r38ZA", - "id" : 0, - "primary" : false - }, - "assigned" : false, <1> - "shard_state_fetch_pending": false, <2> - "unassigned_info" : { - "reason" : "INDEX_CREATED", <3> - "at" : "2016-03-22T20:04:23.620Z" - }, - "allocation_delay_ms" : 0, <4> - "remaining_delay_ms" : 0, <5> - "nodes" : { - "V-Spi0AyRZ6ZvKbaI3691w" : { - "node_name" : "H5dfFeA", - "node_attributes" : { <6> - "bar" : "baz" - }, - "store" : { - "shard_copy" : "NONE" <7> - }, - "final_decision" : "NO", <8> - "final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - "weight" : 0.06666675, <9> - "decisions" : [ { <10> - "decider" : "filter", - "decision" : "NO", - "explanation" : "node does not match index include filters [foo:\"bar\"]" - } ] - }, - "Qc6VL8c5RWaw1qXZ0Rg57g" : { - "node_name" : "bGG90GE", - "node_attributes" : { - "bar" : "baz", - "foo" : "bar" - }, - "store" : { - "shard_copy" : "AVAILABLE" - }, - "final_decision" : "NO", - "final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - "weight" : -1.3833332, - "decisions" : [ { - "decider" : "same_shard", - "decision" : "NO", - "explanation" : "the shard cannot be allocated on the same node id [Qc6VL8c5RWaw1qXZ0Rg57g] on which it already exists" - } ] - }, - "PzdyMZGXQdGhqTJHF_hGgA" : { - "node_name" : "DKDM97B", - "node_attributes" : { }, - "store" : { - "shard_copy" : "NONE" - }, - "final_decision" : "NO", - "final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - "weight" : 2.3166666, - "decisions" : [ { - "decider" : "filter", - "decision" : "NO", - "explanation" : "node does not match index include filters [foo:\"bar\"]" - } ] - } - } -} +$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d '{ + "index": "myindex", + "shard": 0, + "primary": false, + "current_node": "nodeA" <1> +}' -------------------------------------------------- -<1> Whether the shard is assigned or unassigned -<2> Whether information about the shard is still being fetched -<3> Reason for the shard originally becoming unassigned -<4> Configured delay before the shard can be allocated -<5> Remaining delay before the shard can be allocated -<6> User-added attributes the node has -<7> The shard copy information for this node and error (if applicable) -<8> Final decision and explanation of whether the shard can be allocated to this node -<9> Weight for how much the allocator would like to allocate the shard to this node -<10> List of node decisions factoring into final decision about the shard - -For a shard that is already assigned, the output looks similar to: - -[source,js] --------------------------------------------------- -{ - "shard" : { - "index" : "only-foo", - "index_uuid" : "KnW0-zELRs6PK84l0r38ZA", - "id" : 0, - "primary" : true - }, - "assigned" : true, - "assigned_node_id" : "Qc6VL8c5RWaw1qXZ0Rg57g", <1> - "shard_state_fetch_pending": false, - "allocation_delay_ms" : 0, - "remaining_delay_ms" : 0, - "nodes" : { - "V-Spi0AyRZ6ZvKbaI3691w" : { - "node_name" : "bGG90GE", - "node_attributes" : { - "bar" : "baz" - }, - "store" : { - "shard_copy" : "NONE" - }, - "final_decision" : "NO", - "final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - "weight" : 1.4499999, - "decisions" : [ { - "decider" : "filter", - "decision" : "NO", - "explanation" : "node does not match index include filters [foo:\"bar\"]" - } ] - }, - "Qc6VL8c5RWaw1qXZ0Rg57g" : { - "node_name" : "I8hydUG", - "node_attributes" : { - "bar" : "baz", - "foo" : "bar" - }, - "store" : { - "shard_copy" : "AVAILABLE" - }, - "final_decision" : "ALREADY_ASSIGNED", <2> - "final_explanation" : "the shard is already assigned to this node", - "weight" : 0.0, - "decisions" : [ { - "decider" : "same_shard", - "decision" : "NO", - "explanation" : "the shard cannot be allocated on the same node id [Qc6VL8c5RWaw1qXZ0Rg57g] on which it already exists" - } ] - }, - "PzdyMZGXQdGhqTJHF_hGgA" : { - "node_name" : "H5dfFeA", - "node_attributes" : { }, - "store" : { - "shard_copy" : "NONE" - }, - "final_decision" : "NO", - "final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - "weight" : 3.6999998, - "decisions" : [ { - "decider" : "filter", - "decision" : "NO", - "explanation" : "node does not match index include filters [foo:\"bar\"]" - } ] - } - } -} --------------------------------------------------- -<1> Node the shard is currently assigned to -<2> The decision is "ALREADY_ASSIGNED" because the shard is currently assigned to this node +<1> The node where shard 0 currently has a replica on You can also have Elasticsearch explain the allocation of the first unassigned -shard it finds by sending an empty body, such as: +shard that it finds by sending an empty body for the request: [source,js] -------------------------------------------------- $ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -------------------------------------------------- -If you would like to include all decisions that were factored into the final -decision, the `include_yes_decisions` parameter will return all decisions: +=== Explain API Response + +This section includes examples of the cluster allocation explain API response output +under various scenarios. + +The API response for an unassigned shard: + +[source,js] +-------------------------------------------------- +{ + "index" : "idx", + "shard" : 0, + "primary" : true, + "current_state" : "unassigned", <1> + "unassigned_info" : { + "reason" : "INDEX_CREATED", <2> + "at" : "2017-01-04T18:08:16.600Z", + "last_allocation_status" : "no" + }, + "can_allocate" : "no", <3> + "allocate_explanation" : "cannot allocate because allocation is not permitted to any of the nodes", + "node_allocation_decisions" : [ + { + "node_id" : "8qt2rY-pT6KNZB3-hGfLnw", + "node_name" : "node_t1", + "transport_address" : "127.0.0.1:9401", + "node_decision" : "no", <4> + "weight_ranking" : 1, + "deciders" : [ + { + "decider" : "filter", <5> + "decision" : "NO", + "explanation" : "node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]" <6> + } + ] + }, + { + "node_id" : "7Wr-QxLXRLKDxhzNm50pFA", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "node_decision" : "no", + "weight_ranking" : 2, + "deciders" : [ + { + "decider" : "filter", + "decision" : "NO", + "explanation" : "node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]" + } + ] + } + ] +} +-------------------------------------------------- +<1> The current state of the shard +<2> The reason for the shard originally becoming unassigned +<3> Whether to allocate the shard +<4> Whether to allocate the shard to the particular node +<5> The decider which led to the `no` decision for the node +<6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision + +You can return information gathered by the cluster info service about disk usage +and shard sizes by setting the `include_disk_info` parameter to `true`: + +[source,js] +-------------------------------------------------- +$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_disk_info=true' +-------------------------------------------------- + +Additionally, if you would like to include all decisions that were factored into the final +decision, the `include_yes_decisions` parameter will return all decisions for each node: [source,js] -------------------------------------------------- $ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_yes_decisions=true' -------------------------------------------------- -Additionally, you can return information gathered by the cluster info service -about disk usage and shard sizes by setting the `include_disk_info` parameter to -`true`: +The default value for `include_yes_decisions` is `false`, which will only +include the `no` decisions in the response. This is generally what you would +want, as the `no` decisions indicate why a shard is unassigned or cannot be moved, +and including all decisions include the `yes` ones adds a lot of verbosity to the +API's response output. + +The API response output for an unassigned primary shard that had previously been +allocated to a node in the cluster: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_disk_info=true' +{ + "index" : "idx", + "shard" : 0, + "primary" : true, + "current_state" : "unassigned", + "unassigned_info" : { + "reason" : "NODE_LEFT", + "at" : "2017-01-04T18:03:28.464Z", + "details" : "node_left[OIWe8UhhThCK0V5XfmdrmQ]", + "last_allocation_status" : "no_valid_shard_copy" + }, + "can_allocate" : "no_valid_shard_copy", + "allocate_explanation" : "cannot allocate because a previous copy of the primary shard existed but can no longer be found on the nodes in the cluster" +} -------------------------------------------------- + +The API response output for a replica that is unassigned due to delayed allocation: + +[source,js] +-------------------------------------------------- +{ + "index" : "idx", + "shard" : 0, + "primary" : false, + "current_state" : "unassigned", + "unassigned_info" : { + "reason" : "NODE_LEFT", + "at" : "2017-01-04T18:53:59.498Z", + "details" : "node_left[G92ZwuuaRY-9n8_tc-IzEg]", + "last_allocation_status" : "no_attempt" + }, + "can_allocate" : "allocation_delayed", + "allocate_explanation" : "cannot allocate because the cluster is still waiting 59.8s for the departed node holding a replica to rejoin, despite being allowed to allocate the shard to at least one other node", + "configured_delay" : "1m", <1> + "configured_delay_in_millis" : 60000, + "remaining_delay" : "59.8s", <2> + "remaining_delay_in_millis" : 59824, + "node_allocation_decisions" : [ + { + "node_id" : "pmnHu_ooQWCPEFobZGbpWw", + "node_name" : "node_t2", + "transport_address" : "127.0.0.1:9402", + "node_decision" : "yes" + }, + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "node_decision" : "no", + "store" : { <3> + "matching_size" : "4.2kb", + "matching_size_in_bytes" : 4325 + }, + "deciders" : [ + { + "decider" : "same_shard", + "decision" : "NO", + "explanation" : "the shard cannot be allocated to the same node on which a copy of the shard already exists [[idx][0], node[3sULLVJrRneSg0EfBB-2Ew], [P], s[STARTED], a[id=eV9P8BN1QPqRc3B4PLx6cg]]" + } + ] + } + ] +} +-------------------------------------------------- +<1> The configured delay before allocating a replica shard that does not exist due to the node holding it leaving the cluster +<2> The remaining delay before allocating the replica shard +<3> Information about the shard data found on a node + +The API response output for an assigned shard that is not allowed to +remain on its current node and is required to move: + +[source,js] +-------------------------------------------------- +{ + "index" : "idx", + "shard" : 0, + "primary" : true, + "current_state" : "started", + "current_node" : { + "id" : "8lWJeJ7tSoui0bxrwuNhTA", + "name" : "node_t1", + "transport_address" : "127.0.0.1:9401" + }, + "can_remain_on_current_node" : "no", <1> + "can_remain_decisions" : [ <2> + { + "decider" : "filter", + "decision" : "NO", + "explanation" : "node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]" + } + ], + "can_move_to_other_node" : "no", <3> + "move_explanation" : "cannot move shard to another node, even though it is not allowed to remain on its current node", + "node_allocation_decisions" : [ + { + "node_id" : "_P8olZS8Twax9u6ioN-GGA", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "node_decision" : "no", + "weight_ranking" : 1, + "deciders" : [ + { + "decider" : "filter", + "decision" : "NO", + "explanation" : "node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]" + } + ] + } + ] +} +-------------------------------------------------- +<1> Whether the shard is allowed to remain on its current node +<2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node +<3> Whether the shard is allowed to be allocated to another node + +The API response output for an assigned shard that remains on its current node +because moving the shard to another node does not form a better cluster balance: + +[source,js] +-------------------------------------------------- +{ + "index" : "idx", + "shard" : 0, + "primary" : true, + "current_state" : "started", + "current_node" : { + "id" : "wLzJm4N4RymDkBYxwWoJsg", + "name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "weight_ranking" : 1 + }, + "can_remain_on_current_node" : "yes", + "can_rebalance_cluster" : "yes", <1> + "can_rebalance_to_other_node" : "no", <2> + "rebalance_explanation" : "cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", + "node_allocation_decisions" : [ + { + "node_id" : "oE3EGFc8QN-Tdi5FFEprIA", + "node_name" : "node_t1", + "transport_address" : "127.0.0.1:9401", + "node_decision" : "worse_balance", <3> + "weight_ranking" : 1 + } + ] +} +-------------------------------------------------- +<1> Whether rebalancing is allowed on the cluster +<2> Whether the shard can be rebalanced to another node +<3> The reason the shard cannot be rebalanced to the node, in this case indicating that it offers no better balance than the current node diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index 04049663e84..0396b821ebf 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -4,7 +4,8 @@ [partintro] -- -This section describes the following CRUD APIs: +This section starts with a short introduction to Elasticsearch's <>, followed by a +detailed description of the following CRUD APIs: .Single document APIs * <> @@ -23,6 +24,8 @@ index name, or an `alias` which points to a single index. -- +include::docs/data-replication.asciidoc[] + include::docs/index_.asciidoc[] include::docs/get.asciidoc[] diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc new file mode 100644 index 00000000000..47af258204f --- /dev/null +++ b/docs/reference/docs/data-replication.asciidoc @@ -0,0 +1,149 @@ + +[[docs-replication]] +== Reading and Writing documents + +[float] +=== Introduction + +Each index in Elasticsearch is <> +and each shard can have multiple copies. These copies are known as a _replication group_ and must be kept in sync when documents +are added or removed. If we fail to do so, reading from one copy will result in very different results than reading from another. +The process of keeping the shard copies in sync and serving reads from them is what we call the _data replication model_. + +Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the +https://www.microsoft.com/en-us/research/publication/pacifica-replication-in-log-based-distributed-storage-systems/[PacificA paper] of +Microsoft Research. That model is based on having a single copy from the replication group that acts as the primary shard. +The other copies are called _replica shards_. The primary serves as the main entry point for all indexing operations. It is in charge of +validating them and making sure they are correct. Once an index operation has been accepted by the primary, the primary is also +responsible for replicating the operation to the other copies. + +This purpose of this section is to give a high level overview of the Elasticsearch replication model and discuss the implications +it has for various interactions between write and read operations. + +[float] +=== Basic write model + +Every indexing operation in Elasticsearch is first resolved to a replication group using <>, +typically based on the document ID. Once the replication group has been determined, +the operation is forwarded internally to the current _primary shard_ of the group. The primary shard is responsible +for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary +is not required to replicate to all replicas. Instead, Elasticsearch maintains a list of shard copies that should +receive the operation. This list is called the _in-sync copies_ and is maintained by the master node. As the name implies, +these are the set of "good" shard copies that are guaranteed to have processed all of the index and delete operations that +have been acknowledged to the user. The primary is responsible for maintaining this invariant and thus has to replicate all +operations to each copy in this set. + +The primary shard follows this basic flow: + +. Validate incoming operation and reject it if structurally invalid (Example: have an object field where a number is expected) +. Execute the operation locally i.e. indexing or deleting the relevant document. This will also validate the content of fields + and reject if needed (Example: a keyword value is too long for indexing in Lucene). +. Forward the operation to each replica in the current in-sync copies set. If there are multiple replicas, this is done in parallel. +. Once all replicas have successfully performed the operation and responded to the primary, the primary acknowledges the successful + completion of the request to the client. + +[float] +==== Failure handling + +Many things can go wrong during indexing -- disks can get corrupted, nodes can be disconnected from each other, or some +configuration mistake could cause an operation to fail on a replica despite it being successful on the primary. These +are infrequent but the primary has to respond to them. + +In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing +operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a +new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the +health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary +is isolated from the cluster by a networking issue. See <> for more details. + +Once the operation has been successfully performed on the primary, the primary has to deal with potential failures +when executing it on the replica shards. This may be caused by an actual failure on the replica or due to a network +issue preventing the operation from reaching the replica (or preventing the replica from responding). All of these +share the same end result: a replica which is part of the in-sync replica set misses an operation that is about to +be acknowledged. In order to avoid violating the invariant, the primary sends a message to the master requesting +that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged +by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start +building a new shard copy in order to restore the system to a healthy state. + +[[demoted-primary]] +While forwarding an operation to the replicas, the primary will use the replicas to validate that it is still the +active primary. If the primary has been isolated due to a network partition (or a long GC) it may continue to process +incoming indexing operations before realising that it has been demoted. Operations that come from a stale primary +will be rejected by the replicas. When the primary receives a response from the replica rejecting its request because +it is no longer the primary then it will reach out to the master and will learn that it has been replaced. The +operation is then routed to the new primary. + +.What happens if there are no replicas? +************ +This is a valid scenario that can happen due to index configuration or simply +because all the replicas have failed. In that case the primary is processing operations without any external validation, +which may seem problematic. On the other hand, the primary cannot fail other shards on its own but request the master to do +so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed +that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed +into the primary will not be lost. Of course, since at that point we are running with only single copy of the data, physical hardware +issues can cause data loss. See <> for some mitigation options. +************ + +[float] +=== Basic read model + +Reads in Elasticsearch can be very lightweight lookups by ID or a heavy search request with complex aggregations that +take non-trivial CPU power. One of the beauties of the primary-backup model is that it keeps all shard copies identical +(with the exception of in-flight operations). As such, a single in-sync copy is sufficient to serve read requests. + +When a read request is received by a node, that node is responsible for forwarding it to the nodes that hold the relevant shards, +collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow +is as follows: + +. Resolve the read requests to the relevant shards. Note that since most searches will be sent to one or more indices, + they typically need to read from multiple shards, each representing a different subset of the data. +. Select an active copy of each relevant shard, from the shard replication group. This can be either the primary or + a replica. By default, Elasticsearch will simply round robin between the shard copies. +. Send shard level read requests to the selected copies. +. Combine the results and respond. Note that in the case of get by ID look up, only one shard is relevant and this step can be skipped. + +[float] +==== Failure handling + +When a shard fails to respond to a read request, the coordinating node will select another copy from the same replication group +and send the shard level search request to that copy instead. Repetitive failures can result in no shard copies being available. +In some cases, such as `_search`, Elasticsearch will prefer to respond fast, albeit with partial results, instead of waiting +for the issue to be resolved (partial results are indicated in the `_shards` header of the response). + +[float] +=== A few simple implications + +Each of these basic flows determines how Elasticsearch behaves as a system for both reads and writes. Furthermore, since read +and write requests can be executed concurrently, these two basic flows interact with each other. This has a few inherent implications: + +Efficient reads:: Under normal operation each read operation is performed once for each relevant replication group. + Only under failure conditions do multiple copies of the same shard execute the same search. + +Read unacknowledged:: Since the primary first indexes locally and then replicates the request, it is possible for a + concurrent read to already see the change before it has been acknowledged. + +Two copies by default:: This model can be fault tolerant while maintaining only two copies of the data. This is in contrast to + quorum-based system where the minimum number of copies for fault tolerance is 3. + +[float] +=== Failures + +Under failures, the following is possible: + +A single shard can slow down indexing:: Because the primary waits for all replicas in the in-sync copies set during each operation, + a single slow shard can slow down the entire replication group. This is the price we pay for the read efficiency mentioned above. + Of course a single slow shard will also slow down unlucky searches that have been routed to it. + +Dirty reads:: An isolated primary can expose writes that will not be acknowledged. This is caused by the fact that an isolated + primary will only realize that it is isolated once it sends requests to its replicas or when reaching out to the master. + At that point the operation is already indexed into the primary and can be read by a concurrent read. Elasticsearch mitigates + this risk by pinging the master every second (by default) and rejecting indexing operations if no master is known. + +[float] +=== The Tip of the Iceberg + +This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more +going on under the hood. Things like primary terms, cluster state publishing and master election all play a role in +keeping this system behaving correctly. This document also doesn't cover known and important +bugs (both closed and open). We recognize that https://github.com/elastic/elasticsearch/issues?q=label%3Aresiliency[GitHub is hard to keep up with]. +To help people stay on top of those and we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] +on our website. We strongly advise reading it. diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 7ccaf241ab3..bd75ce1694d 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -294,7 +294,7 @@ GET /_tasks/taskId:1 The advantage of this API is that it integrates with `wait_for_completion=false` to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set on it them it'll come back with a +and `wait_for_completion=false` was set on it then it'll come back with `results` or an `error` field. The cost of this feature is the document that `wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to you to delete that document. diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 0d2b564d1ca..0f285ecb13b 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -67,6 +67,7 @@ A document is a basic unit of information that can be indexed. For example, you Within an index/type, you can store as many documents as you want. Note that although a document physically resides in an index, a document actually must be indexed/assigned to a type inside an index. +[[getting-started-shards-and-replicas]] [float] === Shards & Replicas @@ -91,7 +92,7 @@ Replication is important for two primary reasons: To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). -The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number shards after-the-fact. +The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc index eddf9ef3a25..9197c7184ea 100644 --- a/docs/reference/mapping/fields/parent-field.asciidoc +++ b/docs/reference/mapping/fields/parent-field.asciidoc @@ -59,16 +59,18 @@ See the <> and the <> aggregation, and <> for more information. -The value of the `_parent` field is accessible in queries, aggregations, -and scripts: +The value of the `_parent` field is accessible in aggregations +and scripts, and may be queried with the +<>: [source,js] -------------------------- GET my_index/_search { "query": { - "terms": { - "_parent": [ "1" ] <1> + "parent_id": { <1> + "type": "my_child", + "id": "1" } }, "aggs": { @@ -82,7 +84,6 @@ GET my_index/_search "script_fields": { "parent": { "script": { - "lang": "painless", "inline": "doc['_parent']" <3> } } @@ -92,7 +93,7 @@ GET my_index/_search // CONSOLE // TEST[continued] -<1> Querying on the `_parent` field (also see the <> and the <>) +<1> Querying the id of the `_parent` field (also see the <> and the <>) <2> Aggregating on the `_parent` field (also see the <> aggregation) <3> Accessing the `_parent` field in scripts diff --git a/docs/reference/migration/migrate_6_0.asciidoc b/docs/reference/migration/migrate_6_0.asciidoc index abc476a7d1b..2bfe5d88f2d 100644 --- a/docs/reference/migration/migrate_6_0.asciidoc +++ b/docs/reference/migration/migrate_6_0.asciidoc @@ -36,6 +36,7 @@ way to reindex old indices is to use the `reindex` API. * <> * <> * <> +* <> include::migrate_6_0/cat.asciidoc[] @@ -60,3 +61,5 @@ include::migrate_6_0/indices.asciidoc[] include::migrate_6_0/scripting.asciidoc[] include::migrate_6_0/ingest.asciidoc[] + +include::migrate_6_0/percolator.asciidoc[] diff --git a/docs/reference/migration/migrate_6_0/percolator.asciidoc b/docs/reference/migration/migrate_6_0/percolator.asciidoc new file mode 100644 index 00000000000..d31a1857ce9 --- /dev/null +++ b/docs/reference/migration/migrate_6_0/percolator.asciidoc @@ -0,0 +1,6 @@ +[[breaking_60_percolator_changes]] +=== Percolator changes + +==== Deprecated percolator and mpercolate apis have been removed + +Instead the `percolate` query should be used via either the search or msearch apis. \ No newline at end of file diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 4e8b5c61efd..8f3a524dbdd 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -35,11 +35,6 @@ There are several thread pools, but the important ones include: queue_size of `50`. The maximum size for this pool is `1 + # of available processors`. -`percolate`:: - For percolate operations. Thread pool type is `fixed` - with a size of `# of available processors`, - queue_size of `1000`. - `snapshot`:: For snapshot/restore operations. Thread pool type is `scaling` with a keep-alive of `5m` and a max of `min(5, (# of available processors)/2)`. diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index 54fedbee3f7..3649c21eab4 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -25,7 +25,9 @@ be set using the parameter. |`must_not` |The clause (query) must not appear in the matching -documents. +documents. Clauses are executed in <> meaning +that scoring is ignored and clauses are considered for caching. Because scoring is +ignored, a score of `0` for all documents is returned. |======================================================================= [IMPORTANT] diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index f662dc825c0..c35f5111103 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -40,7 +40,7 @@ GET /my_index/_search "query": { "parent_id" : { "type" : "blog_tag", - "id" : "1" + "id" : "1" } } } diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index c67ba5cd73e..7ef7ba60cc5 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -83,6 +83,30 @@ The `simple_query_string` supports the following special characters: In order to search for any of these special characters, they will need to be escaped with `\`. +Be aware that this syntax may have a different behavior depending on the +`default_operator` value. For example, consider the following query: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "simple_query_string" : { + "fields" : ["content"], + "query" : "foo bar -baz" + } + } +} +-------------------------------------------------- +// CONSOLE + +You may expect that documents containing only "foo" or "bar" will be returned, +as long as they do not contain "baz", however, due to the `default_operator` +being OR, this really means "match documents that contain "foo" or documents +that contain "bar", or documents that don't contain "baz". If this is unintended +then the query can be switched to `"foo bar +-baz"` which will not return +documents that contain "baz". + [float] ==== Default Field When not explicitly specifying the field to search on in the query diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 7a26ee2cf60..a4adbc982f6 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -129,6 +129,4 @@ include::search/explain.asciidoc[] include::search/profile.asciidoc[] -include::search/percolate.asciidoc[] - include::search/field-stats.asciidoc[] diff --git a/docs/reference/search/percolate.asciidoc b/docs/reference/search/percolate.asciidoc deleted file mode 100644 index 755b5f7a24c..00000000000 --- a/docs/reference/search/percolate.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[[search-percolate]] -== Percolator - -deprecated[5.0.0,Percolate and multi percolate APIs are deprecated and have been replaced by the new <>] - -For indices created on or after version 5.0.0-alpha1 the percolator automatically indexes the query terms with the percolator queries. This allows the percolator to percolate documents more quickly. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization. diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index 6ca635ce06e..65965fa1bd0 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -62,18 +62,23 @@ echo "deb https://artifacts.elastic.co/packages/{major-version}/apt stable main" endif::[] -[WARNING] +[NOTE] ================================================== -Do not use `add-apt-repository` as it will add a `deb-src` entry as well, but -we do not provide a source package. If you have added the `deb-src` entry, you -will see an error like the following: +These instructions do not use `add-apt-repository` for several reasons: + +. `add-apt-repository` adds entries to the system `/etc/apt/sources.list` file + rather than a clean per-repository file in `/etc/apt/sources.list.d` +. `add-apt-repository` is not part of the default install on many distributions + and requires a number of non-default dependencies. +. Older versions of `add-apt-repository` always add a `deb-src` entry which + will cause errors because we do not provide a source package. If you have added + the `deb-src` entry, you will see an error like the following until you delete + the `deb-src` line: Unable to find expected entry 'main/source/Sources' in Release file (Wrong sources.list entry or malformed file) -Delete the `deb-src` entry from the `/etc/apt/sources.list` file and the -installation should work as expected. ================================================== You can install the Elasticsearch Debian package with: diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 4f7f9647d2b..d5a82f7e1fa 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -248,7 +248,7 @@ In some environments, it may make more sense to prepare a custom image containin FROM docker.elastic.co/elasticsearch/elasticsearch:{version} ADD elasticsearch.yml /usr/share/elasticsearch/config/ USER root -chown elasticsearch:elasticsearch config/elasticsearch.yml +RUN chown elasticsearch:elasticsearch config/elasticsearch.yml USER elasticsearch -------------------------------------------- diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java index 95374069b14..40e227681c4 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.MultiValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -40,19 +39,18 @@ import java.util.Map; public class MatrixStatsAggregationBuilder extends MultiValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "matrix_stats"; - public static final Type TYPE = new Type(NAME); private MultiValueMode multiValueMode = MultiValueMode.AVG; public MatrixStatsAggregationBuilder(String name) { - super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public MatrixStatsAggregationBuilder(StreamInput in) throws IOException { - super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override @@ -72,7 +70,7 @@ public class MatrixStatsAggregationBuilder @Override protected MatrixStatsAggregatorFactory innerBuild(SearchContext context, Map> configs, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - return new MatrixStatsAggregatorFactory(name, type, configs, multiValueMode, context, parent, subFactoriesBuilder, metaData); + return new MatrixStatsAggregatorFactory(name, configs, multiValueMode, context, parent, subFactoriesBuilder, metaData); } @Override @@ -92,7 +90,7 @@ public class MatrixStatsAggregationBuilder } @Override - public String getWriteableName() { + public String getType() { return NAME; } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index 94b0b37dc36..c991e2c5c86 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -22,7 +22,6 @@ import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.MultiValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -38,11 +37,11 @@ public class MatrixStatsAggregatorFactory private final MultiValueMode multiValueMode; - public MatrixStatsAggregatorFactory(String name, InternalAggregation.Type type, + public MatrixStatsAggregatorFactory(String name, Map> configs, MultiValueMode multiValueMode, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, configs, context, parent, subFactoriesBuilder, metaData); + super(name, configs, context, parent, subFactoriesBuilder, metaData); this.multiValueMode = multiValueMode; } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index fde3d24792c..8ac189167b6 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -29,12 +29,11 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -53,29 +52,29 @@ public abstract class MultiValuesSourceAggregationBuilder> extends MultiValuesSourceAggregationBuilder { - protected LeafOnly(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { - super(name, type, valuesSourceType, targetValueType); + protected LeafOnly(String name, ValuesSourceType valuesSourceType, ValueType targetValueType) { + super(name, valuesSourceType, targetValueType); } /** * Read from a stream that does not serialize its targetValueType. This should be used by most subclasses. */ - protected LeafOnly(StreamInput in, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) throws IOException { - super(in, type, valuesSourceType, targetValueType); + protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType, ValueType targetValueType) throws IOException { + super(in, valuesSourceType, targetValueType); } /** * Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override * {@link #serializeTargetValueType()} to return true. */ - protected LeafOnly(StreamInput in, Type type, ValuesSourceType valuesSourceType) throws IOException { - super(in, type, valuesSourceType); + protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType) throws IOException { + super(in, valuesSourceType); } @Override public AB subAggregations(Builder subFactories) { throw new AggregationInitializationException("Aggregator [" + name + "] of type [" + - type + "] cannot accept sub-aggregations"); + getType() + "] cannot accept sub-aggregations"); } } @@ -87,8 +86,8 @@ public abstract class MultiValuesSourceAggregationBuilder missingMap = Collections.emptyMap(); - protected MultiValuesSourceAggregationBuilder(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { - super(name, type); + protected MultiValuesSourceAggregationBuilder(String name, ValuesSourceType valuesSourceType, ValueType targetValueType) { + super(name); if (valuesSourceType == null) { throw new IllegalArgumentException("[valuesSourceType] must not be null: [" + name + "]"); } @@ -96,17 +95,17 @@ public abstract class MultiValuesSourceAggregationBuilder config = new ValuesSourceConfig<>(ValuesSourceType.ANY); return config.format(resolveFormat(null, valueType)); } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java index 23e44a5da17..7d5c56a571b 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.support; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -36,10 +35,10 @@ public abstract class MultiValuesSourceAggregatorFactory> configs; - public MultiValuesSourceAggregatorFactory(String name, Type type, Map> configs, + public MultiValuesSourceAggregatorFactory(String name, Map> configs, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, type, context, parent, subFactoriesBuilder, metaData); + super(name, context, parent, subFactoriesBuilder, metaData); this.configs = configs; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index 192cdc6a463..f88ecc981a8 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.SearchRequestParsers; import java.io.IOException; @@ -75,12 +74,9 @@ public class RestSearchTemplateAction extends BaseRestHandler { }, new ParseField("inline", "template"), ObjectParser.ValueType.OBJECT_OR_STRING); } - private final SearchRequestParsers searchRequestParsers; - @Inject - public RestSearchTemplateAction(Settings settings, RestController controller, SearchRequestParsers searchRequestParsers) { + public RestSearchTemplateAction(Settings settings, RestController controller) { super(settings); - this.searchRequestParsers = searchRequestParsers; controller.registerHandler(GET, "/_search/template", this); controller.registerHandler(POST, "/_search/template", this); @@ -98,7 +94,7 @@ public class RestSearchTemplateAction extends BaseRestHandler { // Creates the search request with all required params SearchRequest searchRequest = new SearchRequest(); - RestSearchAction.parseSearchRequest(searchRequest, request, searchRequestParsers, parseFieldMatcher, null); + RestSearchAction.parseSearchRequest(searchRequest, request, parseFieldMatcher, null); // Creates the search template request SearchTemplateRequest searchTemplateRequest; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 9d1071f62e5..683f4ccf02e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -51,19 +50,17 @@ public class TransportSearchTemplateAction extends HandledTransportAction { - - public static final MultiPercolateAction INSTANCE = new MultiPercolateAction(); - public static final String NAME = "indices:data/read/mpercolate"; - - private MultiPercolateAction() { - super(NAME); - } - - @Override - public MultiPercolateResponse newResponse() { - return new MultiPercolateResponse(); - } - - @Override - public MultiPercolateRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new MultiPercolateRequestBuilder(client, this); - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java deleted file mode 100644 index ad0058b7b2d..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.CompositeIndicesRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContent; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; - -/** - * A multi percolate request that encapsulates multiple {@link PercolateRequest} instances in a single api call. - * - * @deprecated Instead use multi search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class MultiPercolateRequest extends ActionRequest implements CompositeIndicesRequest { - - private String[] indices; - private String documentType; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - private List requests = new ArrayList<>(); - - /** - * Embeds a percolate request to this multi percolate request - */ - public MultiPercolateRequest add(PercolateRequestBuilder requestBuilder) { - return add(requestBuilder.request()); - } - - /** - * Embeds a percolate request to this multi percolate request - */ - public MultiPercolateRequest add(PercolateRequest request) { - if (request.indices() == null && indices != null) { - request.indices(indices); - } - if (request.documentType() == null && documentType != null) { - request.documentType(documentType); - } - if (request.indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() && indicesOptions != IndicesOptions.strictExpandOpenAndForbidClosed()) { - request.indicesOptions(indicesOptions); - } - requests.add(request); - return this; - } - - /** - * Embeds a percolate request which request body is defined as raw bytes to this multi percolate request - */ - public MultiPercolateRequest add(byte[] data, int from, int length) throws Exception { - return add(new BytesArray(data, from, length), true); - } - - /** - * Embeds a percolate request which request body is defined as raw bytes to this multi percolate request - */ - public MultiPercolateRequest add(BytesReference data, boolean allowExplicitIndex) throws IOException { - XContent xContent = XContentFactory.xContent(data); - int from = 0; - int length = data.length(); - byte marker = xContent.streamSeparator(); - while (true) { - int nextMarker = findNextMarker(marker, from, data, length); - if (nextMarker == -1) { - break; - } - // support first line with \n - if (nextMarker == 0) { - from = nextMarker + 1; - continue; - } - - PercolateRequest percolateRequest = new PercolateRequest(); - if (indices != null) { - percolateRequest.indices(indices); - } - if (documentType != null) { - percolateRequest.documentType(documentType); - } - if (indicesOptions != IndicesOptions.strictExpandOpenAndForbidClosed()) { - percolateRequest.indicesOptions(indicesOptions); - } - - // now parse the action - if (nextMarker - from > 0) { - // EMPTY is safe here because we don't call namedObject - try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) { - // Move to START_OBJECT, if token is null, its an empty data - XContentParser.Token token = parser.nextToken(); - if (token != null) { - // Top level json object - assert token == XContentParser.Token.START_OBJECT; - token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchParseException("Expected field"); - } - token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("expected start object"); - } - String percolateAction = parser.currentName(); - if ("percolate".equals(percolateAction)) { - parsePercolateAction(parser, percolateRequest, allowExplicitIndex); - } else if ("count".equals(percolateAction)) { - percolateRequest.onlyCount(true); - parsePercolateAction(parser, percolateRequest, allowExplicitIndex); - } else { - throw new ElasticsearchParseException("[{}] isn't a supported percolate operation", percolateAction); - } - } - } - } - - // move pointers - from = nextMarker + 1; - - // now for the body - nextMarker = findNextMarker(marker, from, data, length); - if (nextMarker == -1) { - break; - } - - percolateRequest.source(data.slice(from, nextMarker - from)); - // move pointers - from = nextMarker + 1; - - add(percolateRequest); - } - - return this; - } - - private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException { - String globalIndex = indices != null && indices.length > 0 ? indices[0] : null; - - Map header = parser.map(); - - if (header.containsKey("id")) { - GetRequest getRequest = new GetRequest(globalIndex); - percolateRequest.getRequest(getRequest); - for (Map.Entry entry : header.entrySet()) { - Object value = entry.getValue(); - if ("id".equals(entry.getKey())) { - getRequest.id(nodeStringValue(value, null)); - header.put("id", entry.getValue()); - } else if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { - if (!allowExplicitIndex) { - throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); - } - getRequest.index(nodeStringValue(value, null)); - } else if ("type".equals(entry.getKey())) { - getRequest.type(nodeStringValue(value, null)); - } else if ("preference".equals(entry.getKey())) { - getRequest.preference(nodeStringValue(value, null)); - } else if ("routing".equals(entry.getKey())) { - getRequest.routing(nodeStringValue(value, null)); - } else if ("percolate_index".equals(entry.getKey()) || "percolate_indices".equals(entry.getKey()) || "percolateIndex".equals(entry.getKey()) || "percolateIndices".equals(entry.getKey())) { - percolateRequest.indices(nodeStringArrayValue(value)); - } else if ("percolate_type".equals(entry.getKey()) || "percolateType".equals(entry.getKey())) { - percolateRequest.documentType(nodeStringValue(value, null)); - } else if ("percolate_preference".equals(entry.getKey()) || "percolatePreference".equals(entry.getKey())) { - percolateRequest.preference(nodeStringValue(value, null)); - } else if ("percolate_routing".equals(entry.getKey()) || "percolateRouting".equals(entry.getKey())) { - percolateRequest.routing(nodeStringValue(value, null)); - } - } - - // Setting values based on get request, if needed... - if ((percolateRequest.indices() == null || percolateRequest.indices().length == 0) && getRequest.index() != null) { - percolateRequest.indices(getRequest.index()); - } - if (percolateRequest.documentType() == null && getRequest.type() != null) { - percolateRequest.documentType(getRequest.type()); - } - if (percolateRequest.routing() == null && getRequest.routing() != null) { - percolateRequest.routing(getRequest.routing()); - } - if (percolateRequest.preference() == null && getRequest.preference() != null) { - percolateRequest.preference(getRequest.preference()); - } - } else { - for (Map.Entry entry : header.entrySet()) { - Object value = entry.getValue(); - if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { - if (!allowExplicitIndex) { - throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); - } - percolateRequest.indices(nodeStringArrayValue(value)); - } else if ("type".equals(entry.getKey())) { - percolateRequest.documentType(nodeStringValue(value, null)); - } else if ("preference".equals(entry.getKey())) { - percolateRequest.preference(nodeStringValue(value, null)); - } else if ("routing".equals(entry.getKey())) { - percolateRequest.routing(nodeStringValue(value, null)); - } - } - } - percolateRequest.indicesOptions(IndicesOptions.fromMap(header, indicesOptions)); - } - - private int findNextMarker(byte marker, int from, BytesReference data, int length) { - for (int i = from; i < length; i++) { - if (data.get(i) == marker) { - return i; - } - } - return -1; - } - - /** - * @return The list of already set percolate requests. - */ - public List requests() { - return this.requests; - } - - /** - * @return Returns the {@link IndicesOptions} that is used as default for all percolate requests. - */ - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - /** - * Sets the {@link IndicesOptions} for all percolate request that don't have this set. - * - * Warning: This should be set before adding any percolate requests. Setting this after adding percolate requests - * will have no effect on any percolate requests already added. - */ - public MultiPercolateRequest indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - return this; - } - - /** - * @return The default indices for all percolate request. - */ - public String[] indices() { - return indices; - } - - /** - * Sets the default indices for any percolate request that doesn't have indices defined. - * - * Warning: This should be set before adding any percolate requests. Setting this after adding percolate requests - * will have no effect on any percolate requests already added. - */ - public MultiPercolateRequest indices(String... indices) { - this.indices = indices; - return this; - } - - /** - * @return Sets the default type for all percolate requests - */ - public String documentType() { - return documentType; - } - - /** - * Sets the default document type for any percolate request that doesn't have a document type set. - * - * Warning: This should be set before adding any percolate requests. Setting this after adding percolate requests - * will have no effect on any percolate requests already added. - */ - public MultiPercolateRequest documentType(String type) { - this.documentType = type; - return this; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (requests.isEmpty()) { - validationException = addValidationError("no requests added", validationException); - } - for (int i = 0; i < requests.size(); i++) { - ActionRequestValidationException ex = requests.get(i).validate(); - if (ex != null) { - if (validationException == null) { - validationException = new ActionRequestValidationException(); - } - validationException.addValidationErrors(ex.validationErrors()); - } - } - - return validationException; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - documentType = in.readOptionalString(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - PercolateRequest request = new PercolateRequest(); - request.readFrom(in); - requests.add(request); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArrayNullable(indices); - out.writeOptionalString(documentType); - indicesOptions.writeIndicesOptions(out); - out.writeVInt(requests.size()); - for (PercolateRequest request : requests) { - request.writeTo(out); - } - } -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequestBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequestBuilder.java deleted file mode 100644 index 8613b8b07bd..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequestBuilder.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; - -/** - * A builder for to ease the use of defining a {@link MultiPercolateRequest} instance. - * - * @deprecated Instead use multi search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class MultiPercolateRequestBuilder extends ActionRequestBuilder { - - public MultiPercolateRequestBuilder(ElasticsearchClient client, MultiPercolateAction action) { - super(client, action, new MultiPercolateRequest()); - } - - /** - * Bundles the specified percolate request to the multi percolate request. - */ - public MultiPercolateRequestBuilder add(PercolateRequest percolateRequest) { - request.add(percolateRequest); - return this; - } - - /** - * Bundles the specified percolate request build to the multi percolate request. - */ - public MultiPercolateRequestBuilder add(PercolateRequestBuilder percolateRequestBuilder) { - request.add(percolateRequestBuilder); - return this; - } - - /** - * Specifies how to globally ignore indices that are not available and how to deal with wildcard indices expressions. - *

- * Invoke this method before invoking {@link #add(PercolateRequestBuilder)}. - */ - public MultiPercolateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request.indicesOptions(indicesOptions); - return this; - } -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java deleted file mode 100644 index 86ca3b0046d..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Iterator; - -/** - * Represents the response of a multi percolate request. - * - * Each item represents the response of a percolator request and the order of the items is in the same order as the - * percolator requests were defined in the multi percolate request. - * - * @deprecated Instead use multi search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class MultiPercolateResponse extends ActionResponse implements Iterable, ToXContentObject { - - private Item[] items; - - MultiPercolateResponse(Item[] items) { - this.items = items; - } - - MultiPercolateResponse() { - this.items = new Item[0]; - } - - @Override - public Iterator iterator() { - return Arrays.stream(items).iterator(); - } - - /** - * Same as {@link #getItems()} - */ - public Item[] items() { - return items; - } - - /** - * @return the percolate responses as items. - */ - public Item[] getItems() { - return items; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startArray(Fields.RESPONSES); - for (MultiPercolateResponse.Item item : items) { - if (item.isFailure()) { - builder.startObject(); - ElasticsearchException.renderException(builder, params, item.getFailure()); - builder.endObject(); - } else { - item.getResponse().toXContent(builder, params); - } - } - builder.endArray(); - builder.endObject(); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(items.length); - for (Item item : items) { - item.writeTo(out); - } - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - items = new Item[size]; - for (int i = 0; i < items.length; i++) { - items[i] = new Item(); - items[i].readFrom(in); - } - } - - /** - * Encapsulates a single percolator response which may contain an error or the actual percolator response itself. - */ - public static class Item implements Streamable { - - private PercolateResponse response; - private Exception exception; - - Item(PercolateResponse response) { - this.response = response; - } - - Item(Exception exception) { - this.exception = exception; - } - - Item() { - } - - - /** - * @return The percolator response or null if there was error. - */ - @Nullable - public PercolateResponse getResponse() { - return response; - } - - /** - * @return An error description if there was an error or null if the percolate request was successful - */ - @Nullable - public String getErrorMessage() { - return exception == null ? null : exception.getMessage(); - } - - /** - * @return true if the percolator request that this item represents failed otherwise - * false is returned. - */ - public boolean isFailure() { - return exception != null; - } - - public Exception getFailure() { - return exception; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - if (in.readBoolean()) { - response = new PercolateResponse(); - response.readFrom(in); - } else { - exception = in.readException(); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (response != null) { - out.writeBoolean(true); - response.writeTo(out); - } else { - out.writeBoolean(false); - out.writeException(exception); - } - } - } - - static final class Fields { - static final String RESPONSES = "responses"; - static final String ERROR = "error"; - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateAction.java deleted file mode 100644 index cebca9ed825..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateAction.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; - -@Deprecated -public class PercolateAction extends Action { - - public static final PercolateAction INSTANCE = new PercolateAction(); - public static final String NAME = "indices:data/read/percolate"; - - private PercolateAction() { - super(NAME); - } - - @Override - public PercolateResponse newResponse() { - return new PercolateResponse(); - } - - @Override - public PercolateRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new PercolateRequestBuilder(client, this); - } -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java deleted file mode 100644 index bc449ea932d..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; - -import java.io.IOException; -import java.util.Map; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * A request to execute a percolate operation. - * - * @deprecated Instead use search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class PercolateRequest extends ActionRequest implements IndicesRequest.Replaceable { - - protected String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - private String documentType; - private String routing; - private String preference; - private boolean onlyCount; - - private GetRequest getRequest; - private BytesReference source; - - public String[] indices() { - return indices; - } - - public final PercolateRequest indices(String... indices) { - this.indices = indices; - return this; - } - - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - public PercolateRequest indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - return this; - } - - - /** - * Getter for {@link #documentType(String)} - */ - public String documentType() { - return documentType; - } - - /** - * Sets the type of the document to percolate. This is important as it selects the mapping to be used to parse - * the document. - */ - public PercolateRequest documentType(String type) { - this.documentType = type; - return this; - } - - /** - * Getter for {@link #routing(String)} - */ - public String routing() { - return routing; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public PercolateRequest routing(String routing) { - this.routing = routing; - return this; - } - - /** - * Getter for {@link #preference(String)} - */ - public String preference() { - return preference; - } - - /** - * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards, _primary to execute only on primary shards, or - * a custom value, which guarantees that the same order will be used across different requests. - */ - public PercolateRequest preference(String preference) { - this.preference = preference; - return this; - } - - /** - * Getter for {@link #getRequest(GetRequest)} - */ - public GetRequest getRequest() { - return getRequest; - } - - /** - * This defines where to fetch the document to be percolated from, which is an alternative of defining the document - * to percolate in the request body. - * - * If this defined than this will override the document specified in the request body. - */ - public PercolateRequest getRequest(GetRequest getRequest) { - this.getRequest = getRequest; - return this; - } - - /** - * @return The request body in its raw form. - */ - public BytesReference source() { - return source; - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - public PercolateRequest source(Map document) throws ElasticsearchGenerationException { - return source(document, Requests.CONTENT_TYPE); - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - @SuppressWarnings("unchecked") - public PercolateRequest source(Map document, XContentType contentType) throws ElasticsearchGenerationException { - try { - XContentBuilder builder = XContentFactory.contentBuilder(contentType); - builder.map(document); - return source(builder); - } catch (IOException e) { - throw new ElasticsearchGenerationException("Failed to generate [" + document + "]", e); - } - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - public PercolateRequest source(String document) { - this.source = new BytesArray(document); - return this; - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - public PercolateRequest source(XContentBuilder documentBuilder) { - source = documentBuilder.bytes(); - return this; - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - public PercolateRequest source(byte[] document) { - return source(document, 0, document.length); - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - public PercolateRequest source(byte[] source, int offset, int length) { - return source(new BytesArray(source, offset, length)); - } - - /** - * Raw version of {@link #source(PercolateSourceBuilder)} - */ - public PercolateRequest source(BytesReference source) { - this.source = source; - return this; - } - - /** - * Sets the request body definition for this percolate request as raw bytes. - * - * This is the preferred way to set the request body. - */ - public PercolateRequest source(PercolateSourceBuilder sourceBuilder) { - this.source = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE); - return this; - } - - /** - * Getter for {@link #onlyCount(boolean)} - */ - public boolean onlyCount() { - return onlyCount; - } - - /** - * Sets whether this percolate request should only count the number of percolator queries that matches with - * the document being percolated and don't keep track of the actual queries that have matched. - */ - public PercolateRequest onlyCount(boolean onlyCount) { - this.onlyCount = onlyCount; - return this; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (documentType == null) { - validationException = addValidationError("type is missing", validationException); - } - if (source == null && getRequest == null) { - validationException = addValidationError("source or get is missing", validationException); - } - if (getRequest != null && getRequest.storedFields() != null) { - validationException = addValidationError("get stored fields option isn't supported via percolate request", validationException); - } - return validationException; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - documentType = in.readString(); - routing = in.readOptionalString(); - preference = in.readOptionalString(); - source = in.readBytesReference(); - if (in.readBoolean()) { - getRequest = new GetRequest(); - getRequest.readFrom(in); - } - onlyCount = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArrayNullable(indices); - indicesOptions.writeIndicesOptions(out); - out.writeString(documentType); - out.writeOptionalString(routing); - out.writeOptionalString(preference); - out.writeBytesReference(source); - if (getRequest != null) { - out.writeBoolean(true); - getRequest.writeTo(out); - } else { - out.writeBoolean(false); - } - out.writeBoolean(onlyCount); - } -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequestBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequestBuilder.java deleted file mode 100644 index e73a3267fee..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequestBuilder.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.sort.SortBuilder; - -import java.util.Map; - -/** - * A builder the easy to use of defining a percolate request. - * - * @deprecated Instead use search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class PercolateRequestBuilder extends ActionRequestBuilder { - - private PercolateSourceBuilder sourceBuilder; - - public PercolateRequestBuilder(ElasticsearchClient client, PercolateAction action) { - super(client, action, new PercolateRequest()); - } - - public PercolateRequestBuilder setIndices(String... indices) { - request.indices(indices); - return this; - } - - public PercolateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request.indicesOptions(indicesOptions); - return this; - } - - /** - * Sets the type of the document to percolate. This is important as it selects the mapping to be used to parse - * the document. - */ - public PercolateRequestBuilder setDocumentType(String type) { - request.documentType(type); - return this; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public PercolateRequestBuilder setRouting(String routing) { - request.routing(routing); - return this; - } - - /** - * List of routing values to control the shards the search will be executed on. - */ - public PercolateRequestBuilder setRouting(String... routings) { - request.routing(Strings.arrayToCommaDelimitedString(routings)); - return this; - } - - /** - * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards, _primary to execute only on primary shards, or - * a custom value, which guarantees that the same order will be used across different requests. - */ - public PercolateRequestBuilder setPreference(String preference) { - request.preference(preference); - return this; - } - - /** - * Enables percolating an existing document. Instead of specifying the source of the document to percolate, define - * a get request that will fetch a document and use its source. - */ - public PercolateRequestBuilder setGetRequest(GetRequest getRequest) { - request.getRequest(getRequest); - return this; - } - - /** - * Whether only to return total count and don't keep track of the matches (Count percolation). - */ - public PercolateRequestBuilder setOnlyCount(boolean onlyCount) { - request.onlyCount(onlyCount); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#setSize(int)}} - */ - public PercolateRequestBuilder setSize(int size) { - sourceBuilder().setSize(size); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#setSort(boolean)}} - */ - public PercolateRequestBuilder setSortByScore(boolean sort) { - sourceBuilder().setSort(sort); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#addSort(SortBuilder)} - */ - public PercolateRequestBuilder addSort(SortBuilder sort) { - sourceBuilder().addSort(sort); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#setSort(boolean)}} - */ - public PercolateRequestBuilder setScore(boolean score) { - sourceBuilder().setTrackScores(score); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#setDoc(PercolateSourceBuilder.DocBuilder)} - */ - public PercolateRequestBuilder setPercolateDoc(PercolateSourceBuilder.DocBuilder docBuilder) { - sourceBuilder().setDoc(docBuilder); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#setQueryBuilder(QueryBuilder)} - */ - public PercolateRequestBuilder setPercolateQuery(QueryBuilder queryBuilder) { - sourceBuilder().setQueryBuilder(queryBuilder); - return this; - } - - /** - * Delegates to {@link PercolateSourceBuilder#setHighlightBuilder(HighlightBuilder)} - */ - public PercolateRequestBuilder setHighlightBuilder(HighlightBuilder highlightBuilder) { - sourceBuilder().setHighlightBuilder(highlightBuilder); - return this; - } - - /** - * Delegates to - * {@link PercolateSourceBuilder#addAggregation(AggregationBuilder)} - */ - public PercolateRequestBuilder addAggregation(AggregationBuilder aggregationBuilder) { - sourceBuilder().addAggregation(aggregationBuilder); - return this; - } - - /** - * Delegates to - * {@link PercolateSourceBuilder#addAggregation(PipelineAggregationBuilder)} - */ - public PercolateRequestBuilder addAggregation(PipelineAggregationBuilder aggregationBuilder) { - sourceBuilder().addAggregation(aggregationBuilder); - return this; - } - - /** - * Sets the percolate request definition directly on the request. This will - * overwrite any definitions set by any of the delegate methods. - */ - public PercolateRequestBuilder setSource(PercolateSourceBuilder source) { - sourceBuilder = source; - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(Map source) { - request.source(source); - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(Map source, XContentType contentType) { - request.source(source, contentType); - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(String source) { - request.source(source); - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(XContentBuilder sourceBuilder) { - request.source(sourceBuilder); - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(BytesReference source) { - request.source(source); - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(byte[] source) { - request.source(source); - return this; - } - - /** - * Raw variant of {@link #setSource(PercolateSourceBuilder)} - */ - public PercolateRequestBuilder setSource(byte[] source, int offset, int length) { - request.source(source, offset, length); - return this; - } - - private PercolateSourceBuilder sourceBuilder() { - if (sourceBuilder == null) { - sourceBuilder = new PercolateSourceBuilder(); - } - return sourceBuilder; - } - - @Override - public PercolateRequest request() { - if (sourceBuilder != null) { - request.source(sourceBuilder); - } - return request; - } - - @Override - protected PercolateRequest beforeExecute(PercolateRequest request) { - if (sourceBuilder != null) { - request.source(sourceBuilder); - } - return request; - } -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateResponse.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateResponse.java deleted file mode 100644 index 7e25af7ea6e..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateResponse.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.action.ShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -/** - * Encapsulates the response of a percolator request. - * - * @deprecated Instead use search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class PercolateResponse extends BroadcastResponse implements Iterable, ToXContentObject { - - public static final Match[] EMPTY = new Match[0]; - // PercolateQuery emits this score if no 'query' is defined in the percolate request - public static final float NO_SCORE = 0.0f; - - private long tookInMillis; - private Match[] matches; - private long count; - private InternalAggregations aggregations; - - PercolateResponse(int totalShards, int successfulShards, int failedShards, List shardFailures, - Match[] matches, long count, long tookInMillis, InternalAggregations aggregations) { - super(totalShards, successfulShards, failedShards, shardFailures); - if (tookInMillis < 0) { - throw new IllegalArgumentException("tookInMillis must be positive but was: " + tookInMillis); - } - this.tookInMillis = tookInMillis; - this.matches = matches; - this.count = count; - this.aggregations = aggregations; - } - - PercolateResponse() { - } - - /** - * How long the percolate took. - */ - public TimeValue getTook() { - return new TimeValue(tookInMillis); - } - - /** - * How long the percolate took in milliseconds. - */ - public long getTookInMillis() { - return tookInMillis; - } - - /** - * @return The queries that match with the document being percolated. This can return null if th. - */ - public Match[] getMatches() { - return this.matches; - } - - /** - * @return The total number of queries that have matched with the document being percolated. - */ - public long getCount() { - return count; - } - - /** - * @return Any aggregations that has been executed on the query metadata. This can return null. - */ - public InternalAggregations getAggregations() { - return aggregations; - } - - @Override - public Iterator iterator() { - return Arrays.asList(matches).iterator(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - innerToXContent(builder, params); - builder.endObject(); - return builder; - } - - public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.TOOK, tookInMillis); - RestActions.buildBroadcastShardsHeader(builder, params, this); - - builder.field(Fields.TOTAL, count); - if (matches != null) { - builder.startArray(Fields.MATCHES); - boolean justIds = "ids".equals(params.param("percolate_format")); - if (justIds) { - for (PercolateResponse.Match match : matches) { - builder.value(match.getId()); - } - } else { - for (PercolateResponse.Match match : matches) { - builder.startObject(); - builder.field(Fields._INDEX, match.getIndex()); - builder.field(Fields._ID, match.getId()); - float score = match.getScore(); - if (score != NO_SCORE) { - builder.field(Fields._SCORE, match.getScore()); - } - if (match.getHighlightFields().isEmpty() == false) { - builder.startObject(Fields.HIGHLIGHT); - for (HighlightField field : match.getHighlightFields().values()) { - field.toXContent(builder, params); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - } - if (aggregations != null) { - aggregations.toXContent(builder, params); - } - return builder; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - tookInMillis = in.readVLong(); - count = in.readVLong(); - int size = in.readVInt(); - if (size != -1) { - matches = new Match[size]; - for (int i = 0; i < size; i++) { - matches[i] = new Match(); - matches[i].readFrom(in); - } - } - aggregations = InternalAggregations.readOptionalAggregations(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVLong(tookInMillis); - out.writeVLong(count); - if (matches == null) { - out.writeVInt(-1); - } else { - out.writeVInt(matches.length); - for (Match match : matches) { - match.writeTo(out); - } - } - out.writeOptionalStreamable(aggregations); - } - - /** - * Represents a query that has matched with the document that was percolated. - */ - public static class Match implements Streamable { - - private Text index; - private Text id; - private float score; - private Map hl; - - /** - * Constructor only for internal usage. - */ - public Match(Text index, Text id, float score, Map hl) { - this.id = id; - this.score = score; - this.index = index; - this.hl = hl; - } - - /** - * Constructor only for internal usage. - */ - public Match(Text index, Text id, float score) { - this.id = id; - this.score = score; - this.index = index; - } - - Match() { - } - - /** - * @return The index that the matched percolator query resides in. - */ - public Text getIndex() { - return index; - } - - /** - * @return The id of the matched percolator query. - */ - public Text getId() { - return id; - } - - /** - * @return If in the percolate request a query was specified this returns the score representing how well that - * query matched on the metadata associated with the matching query otherwise {@link Float#NaN} is returned. - */ - public float getScore() { - return score; - } - - /** - * @return If highlighting was specified in the percolate request the this returns highlight snippets for each - * matching field in the document being percolated based on this query otherwise null is returned. - */ - @Nullable - public Map getHighlightFields() { - return hl; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - id = in.readText(); - index = in.readText(); - score = in.readFloat(); - int size = in.readVInt(); - if (size > 0) { - hl = new HashMap<>(size); - for (int j = 0; j < size; j++) { - hl.put(in.readString(), HighlightField.readHighlightField(in)); - } - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeText(id); - out.writeText(index); - out.writeFloat(score); - if (hl != null) { - out.writeVInt(hl.size()); - for (Map.Entry entry : hl.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - } else { - out.writeVInt(0); - } - } - } - - static final class Fields { - static final String TOOK = "took"; - static final String TOTAL = "total"; - static final String MATCHES = "matches"; - static final String _INDEX = "_index"; - static final String _ID = "_id"; - static final String _SCORE = "_score"; - static final String HIGHLIGHT = "highlight"; - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateSourceBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateSourceBuilder.java deleted file mode 100644 index 4019f91300c..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateSourceBuilder.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.action.support.ToXContentToBytes; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.sort.ScoreSortBuilder; -import org.elasticsearch.search.sort.SortBuilder; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Builder to create the percolate request body. - * - * @deprecated Instead use search API with {@link PercolateQueryBuilder} - */ -@Deprecated -public class PercolateSourceBuilder extends ToXContentToBytes { - - private DocBuilder docBuilder; - private QueryBuilder queryBuilder; - private Integer size; - private List> sorts; - private Boolean trackScores; - private HighlightBuilder highlightBuilder; - private List aggregationBuilders; - private List pipelineAggregationBuilders; - - /** - * Sets the document to run the percolate queries against. - */ - public PercolateSourceBuilder setDoc(DocBuilder docBuilder) { - this.docBuilder = docBuilder; - return this; - } - - /** - * Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based - * on this query. - */ - public PercolateSourceBuilder setQueryBuilder(QueryBuilder queryBuilder) { - this.queryBuilder = queryBuilder; - return this; - } - - /** - * Limits the maximum number of percolate query matches to be returned. - */ - public PercolateSourceBuilder setSize(int size) { - this.size = size; - return this; - } - - /** - * Similar as {@link #setTrackScores(boolean)}, but whether to sort by the score descending. - */ - public PercolateSourceBuilder setSort(boolean sort) { - if (sort) { - addSort(new ScoreSortBuilder()); - } else { - this.sorts = null; - } - return this; - } - - /** - * Adds a sort builder. Only sorting by score desc is supported. - * - * By default the matching percolator queries are returned in an undefined order. - */ - public PercolateSourceBuilder addSort(SortBuilder sort) { - if (sorts == null) { - sorts = new ArrayList<>(); - } - sorts.add(sort); - return this; - } - - /** - * Whether to compute a score for each match and include it in the response. The score is based on - * {@link #setQueryBuilder(QueryBuilder)}. - */ - public PercolateSourceBuilder setTrackScores(boolean trackScores) { - this.trackScores = trackScores; - return this; - } - - /** - * Enables highlighting for the percolate document. Per matched percolate query highlight the percolate document. - */ - public PercolateSourceBuilder setHighlightBuilder(HighlightBuilder highlightBuilder) { - this.highlightBuilder = highlightBuilder; - return this; - } - - /** - * Add an aggregation definition. - */ - public PercolateSourceBuilder addAggregation(AggregationBuilder aggregationBuilder) { - if (aggregationBuilders == null) { - aggregationBuilders = new ArrayList<>(); - } - aggregationBuilders.add(aggregationBuilder); - return this; - } - - /** - * Add an aggregation definition. - */ - public PercolateSourceBuilder addAggregation(PipelineAggregationBuilder aggregationBuilder) { - if (pipelineAggregationBuilders == null) { - pipelineAggregationBuilders = new ArrayList<>(); - } - pipelineAggregationBuilders.add(aggregationBuilder); - return this; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (docBuilder != null) { - docBuilder.toXContent(builder, params); - } - if (queryBuilder != null) { - builder.field("query"); - queryBuilder.toXContent(builder, params); - } - if (size != null) { - builder.field("size", size); - } - if (sorts != null) { - builder.startArray("sort"); - for (SortBuilder sort : sorts) { - sort.toXContent(builder, params); - } - builder.endArray(); - } - if (trackScores != null) { - builder.field("track_scores", trackScores); - } - if (highlightBuilder != null) { - builder.field(SearchSourceBuilder.HIGHLIGHT_FIELD.getPreferredName(), highlightBuilder); - } - if (aggregationBuilders != null || pipelineAggregationBuilders != null) { - builder.field("aggregations"); - builder.startObject(); - if (aggregationBuilders != null) { - for (AggregationBuilder aggregation : aggregationBuilders) { - aggregation.toXContent(builder, params); - } - } - if (pipelineAggregationBuilders != null) { - for (PipelineAggregationBuilder aggregation : pipelineAggregationBuilders) { - aggregation.toXContent(builder, params); - } - } - builder.endObject(); - } - builder.endObject(); - return builder; - } - - /** - * @return A new {@link DocBuilder} instance. - */ - public static DocBuilder docBuilder() { - return new DocBuilder(); - } - - /** - * A builder for defining the document to be percolated in various ways. - */ - public static class DocBuilder implements ToXContent { - - private BytesReference doc; - - /** - * Sets the document to be percolated. - */ - public DocBuilder setDoc(BytesReference doc) { - this.doc = doc; - return this; - } - - /** - * Sets the document to be percolated. - */ - public DocBuilder setDoc(String field, Object value) { - Map values = new HashMap<>(2); - values.put(field, value); - setDoc(values); - return this; - } - - /** - * Sets the document to be percolated. - */ - public DocBuilder setDoc(String doc) { - this.doc = new BytesArray(doc); - return this; - } - - /** - * Sets the document to be percolated. - */ - public DocBuilder setDoc(XContentBuilder doc) { - this.doc = doc.bytes(); - return this; - } - - /** - * Sets the document to be percolated. - */ - public DocBuilder setDoc(Map doc) { - return setDoc(doc, Requests.CONTENT_TYPE); - } - - @SuppressWarnings("unchecked") - public DocBuilder setDoc(Map doc, XContentType contentType) { - try { - return setDoc(XContentFactory.contentBuilder(contentType).map(doc)); - } catch (IOException e) { - throw new ElasticsearchGenerationException("Failed to generate [" + doc + "]", e); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.rawField("doc", doc); - } - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index d314de3b05f..d09599a7af4 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -19,26 +19,21 @@ package org.elasticsearch.percolator; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.rest.RestHandler; import org.elasticsearch.search.fetch.FetchSubPhase; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; import static java.util.Collections.singletonList; -public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlugin, SearchPlugin { +public class PercolatorPlugin extends Plugin implements MapperPlugin, SearchPlugin { private final Settings settings; @@ -46,17 +41,6 @@ public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlug this.settings = settings; } - @Override - public List> getActions() { - return Arrays.asList(new ActionHandler<>(PercolateAction.INSTANCE, TransportPercolateAction.class), - new ActionHandler<>(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class)); - } - - @Override - public List> getRestHandlers() { - return Arrays.asList(RestPercolateAction.class, RestMultiPercolateAction.class); - } - @Override public List> getQueries() { return singletonList(new QuerySpec<>(PercolateQueryBuilder.NAME, PercolateQueryBuilder::new, PercolateQueryBuilder::fromXContent)); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java deleted file mode 100644 index 193e1ec6e6d..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.POST; - -@Deprecated -public class RestMultiPercolateAction extends BaseRestHandler { - - private final boolean allowExplicitIndex; - - @Inject - public RestMultiPercolateAction(Settings settings, RestController controller) { - super(settings); - controller.registerHandler(POST, "/_mpercolate", this); - controller.registerHandler(POST, "/{index}/_mpercolate", this); - controller.registerHandler(POST, "/{index}/{type}/_mpercolate", this); - - controller.registerHandler(GET, "/_mpercolate", this); - controller.registerHandler(GET, "/{index}/_mpercolate", this); - controller.registerHandler(GET, "/{index}/{type}/_mpercolate", this); - - this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings); - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { - MultiPercolateRequest multiPercolateRequest = new MultiPercolateRequest(); - multiPercolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, multiPercolateRequest.indicesOptions())); - multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); - multiPercolateRequest.documentType(restRequest.param("type")); - multiPercolateRequest.add(restRequest.contentOrSourceParam(), allowExplicitIndex); - return channel -> client.execute(MultiPercolateAction.INSTANCE, multiPercolateRequest, new RestToXContentListener<>(channel)); - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java deleted file mode 100644 index 55c17c8715a..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.rest.action.RestToXContentListener; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.POST; - -@Deprecated -public class RestPercolateAction extends BaseRestHandler { - - @Inject - public RestPercolateAction(Settings settings, RestController controller) { - super(settings); - controller.registerHandler(GET, "/{index}/{type}/_percolate", this); - controller.registerHandler(POST, "/{index}/{type}/_percolate", this); - - RestPercolateExistingDocHandler existingDocHandler = new RestPercolateExistingDocHandler(settings); - controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate", existingDocHandler); - controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate", existingDocHandler); - - RestCountPercolateDocHandler countHandler = new RestCountPercolateDocHandler(settings); - controller.registerHandler(GET, "/{index}/{type}/_percolate/count", countHandler); - controller.registerHandler(POST, "/{index}/{type}/_percolate/count", countHandler); - - RestCountPercolateExistingDocHandler countExistingDocHandler = new RestCountPercolateExistingDocHandler(settings); - controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); - controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); - } - - private RestChannelConsumer parseDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, NodeClient client) { - percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); - percolateRequest.documentType(restRequest.param("type")); - percolateRequest.routing(restRequest.param("routing")); - percolateRequest.preference(restRequest.param("preference")); - percolateRequest.source(restRequest.contentOrSourceParam()); - - percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions())); - return channel -> executePercolate(client, percolateRequest, channel); - } - - private RestChannelConsumer parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, NodeClient client) { - String index = restRequest.param("index"); - String type = restRequest.param("type"); - percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("percolate_index", index))); - percolateRequest.documentType(restRequest.param("percolate_type", type)); - - GetRequest getRequest = new GetRequest(index, type, - restRequest.param("id")); - getRequest.routing(restRequest.param("routing")); - getRequest.preference(restRequest.param("preference")); - getRequest.refresh(restRequest.paramAsBoolean("refresh", getRequest.refresh())); - getRequest.realtime(restRequest.paramAsBoolean("realtime", getRequest.realtime())); - getRequest.version(RestActions.parseVersion(restRequest)); - getRequest.versionType(VersionType.fromString(restRequest.param("version_type"), getRequest.versionType())); - - percolateRequest.getRequest(getRequest); - percolateRequest.routing(restRequest.param("percolate_routing")); - percolateRequest.preference(restRequest.param("percolate_preference")); - percolateRequest.source(restRequest.contentOrSourceParam()); - - percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions())); - return channel -> executePercolate(client, percolateRequest, channel); - } - - private void executePercolate(final NodeClient client, final PercolateRequest percolateRequest, final RestChannel restChannel) { - client.execute(PercolateAction.INSTANCE, percolateRequest, new RestToXContentListener<>(restChannel)); - } - - @Override - public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { - PercolateRequest percolateRequest = new PercolateRequest(); - return parseDocPercolate(percolateRequest, restRequest, client); - } - - private final class RestCountPercolateDocHandler extends BaseRestHandler { - - private RestCountPercolateDocHandler(Settings settings) { - super(settings); - } - - @Override - public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { - PercolateRequest percolateRequest = new PercolateRequest(); - percolateRequest.onlyCount(true); - return parseDocPercolate(percolateRequest, restRequest, client); - } - - } - - private final class RestPercolateExistingDocHandler extends BaseRestHandler { - - RestPercolateExistingDocHandler(Settings settings) { - super(settings); - } - - @Override - public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { - PercolateRequest percolateRequest = new PercolateRequest(); - return parseExistingDocPercolate(percolateRequest, restRequest, client); - } - - } - - private final class RestCountPercolateExistingDocHandler extends BaseRestHandler { - - RestCountPercolateExistingDocHandler(Settings settings) { - super(settings); - } - - @Override - public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { - PercolateRequest percolateRequest = new PercolateRequest(); - percolateRequest.onlyCount(true); - return parseExistingDocPercolate(percolateRequest, restRequest, client); - } - - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java deleted file mode 100644 index 26f61e752e6..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.MultiGetItemResponse; -import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@Deprecated -public class TransportMultiPercolateAction extends HandledTransportAction { - - private final Client client; - private final SearchRequestParsers searchRequestParsers; - private final NamedXContentRegistry xContentRegistry; - - @Inject - public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, SearchRequestParsers searchRequestParsers, NamedXContentRegistry xContentRegistry) { - super(settings, MultiPercolateAction.NAME, threadPool, transportService, actionFilters, - indexNameExpressionResolver, MultiPercolateRequest::new); - this.client = client; - this.searchRequestParsers = searchRequestParsers; - this.xContentRegistry = xContentRegistry; - } - - @Override - protected void doExecute(MultiPercolateRequest request, ActionListener listener) { - List> getRequests = new ArrayList<>(); - for (int i = 0; i < request.requests().size(); i++) { - GetRequest getRequest = request.requests().get(i).getRequest(); - if (getRequest != null) { - getRequests.add(new Tuple<>(i, getRequest)); - } - } - if (getRequests.isEmpty()) { - innerDoExecute(request, listener, Collections.emptyMap(), new HashMap<>()); - } else { - MultiGetRequest multiGetRequest = new MultiGetRequest(); - for (Tuple tuple : getRequests) { - GetRequest getRequest = tuple.v2(); - multiGetRequest.add(new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())); - } - client.multiGet(multiGetRequest, new ActionListener() { - @Override - public void onResponse(MultiGetResponse response) { - Map getResponseSources = new HashMap<>(response.getResponses().length); - Map preFailures = new HashMap<>(); - for (int i = 0; i < response.getResponses().length; i++) { - MultiGetItemResponse itemResponse = response.getResponses()[i]; - int originalSlot = getRequests.get(i).v1(); - if (itemResponse.isFailed()) { - preFailures.put(originalSlot, new MultiPercolateResponse.Item(itemResponse.getFailure().getFailure())); - } else { - if (itemResponse.getResponse().isExists()) { - getResponseSources.put(originalSlot, itemResponse.getResponse().getSourceAsBytesRef()); - } else { - GetRequest getRequest = getRequests.get(i).v2(); - preFailures.put(originalSlot, new MultiPercolateResponse.Item(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", getRequest.index(), getRequest.type(), getRequest.id()))); - } - } - } - innerDoExecute(request, listener, getResponseSources, preFailures); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - } - } - - private void innerDoExecute(MultiPercolateRequest request, ActionListener listener, Map getResponseSources, Map preFailures) { - try { - MultiSearchRequest multiSearchRequest = createMultiSearchRequest(request, getResponseSources, preFailures); - if (multiSearchRequest.requests().isEmpty()) { - // we may failed to turn all percolate requests into search requests, - // in that case just return the response... - listener.onResponse( - createMultiPercolateResponse(new MultiSearchResponse(new MultiSearchResponse.Item[0]), request, preFailures) - ); - } else { - client.multiSearch(multiSearchRequest, new ActionListener() { - @Override - public void onResponse(MultiSearchResponse response) { - try { - listener.onResponse(createMultiPercolateResponse(response, request, preFailures)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - } - } catch (Exception e) { - listener.onFailure(e); - } - } - - private MultiSearchRequest createMultiSearchRequest(MultiPercolateRequest multiPercolateRequest, Map getResponseSources, Map preFailures) throws IOException { - MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); - multiSearchRequest.indicesOptions(multiPercolateRequest.indicesOptions()); - - for (int i = 0; i < multiPercolateRequest.requests().size(); i++) { - if (preFailures.keySet().contains(i)) { - continue; - } - - PercolateRequest percolateRequest = multiPercolateRequest.requests().get(i); - BytesReference docSource = getResponseSources.get(i); - try { - SearchRequest searchRequest = TransportPercolateAction.createSearchRequest(percolateRequest, docSource, xContentRegistry, - parseFieldMatcher); - multiSearchRequest.add(searchRequest); - } catch (Exception e) { - preFailures.put(i, new MultiPercolateResponse.Item(e)); - } - } - - return multiSearchRequest; - } - - private MultiPercolateResponse createMultiPercolateResponse(MultiSearchResponse multiSearchResponse, MultiPercolateRequest request, Map preFailures) { - int searchResponseIndex = 0; - MultiPercolateResponse.Item[] percolateItems = new MultiPercolateResponse.Item[request.requests().size()]; - for (int i = 0; i < percolateItems.length; i++) { - if (preFailures.keySet().contains(i)) { - percolateItems[i] = preFailures.get(i); - } else { - MultiSearchResponse.Item searchItem = multiSearchResponse.getResponses()[searchResponseIndex++]; - if (searchItem.isFailure()) { - percolateItems[i] = new MultiPercolateResponse.Item(searchItem.getFailure()); - } else { - PercolateRequest percolateRequest = request.requests().get(i); - percolateItems[i] = new MultiPercolateResponse.Item(TransportPercolateAction.createPercolateResponse(searchItem.getResponse(), percolateRequest.onlyCount())); - } - } - } - return new MultiPercolateResponse(percolateItems); - } - -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java deleted file mode 100644 index 30bf1d0808a..00000000000 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportPercolateAction.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.ConstantScoreQueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -@Deprecated -public class TransportPercolateAction extends HandledTransportAction { - - private final Client client; - private final SearchRequestParsers searchRequestParsers; - private final NamedXContentRegistry xContentRegistry; - - @Inject - public TransportPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, SearchRequestParsers searchRequestParsers, NamedXContentRegistry xContentRegistry) { - super(settings, PercolateAction.NAME, threadPool, transportService, actionFilters, - indexNameExpressionResolver, PercolateRequest::new); - this.client = client; - this.searchRequestParsers = searchRequestParsers; - this.xContentRegistry = xContentRegistry; - } - - @Override - protected void doExecute(PercolateRequest request, ActionListener listener) { - if (request.getRequest() != null) { - client.get(request.getRequest(), new ActionListener() { - @Override - public void onResponse(GetResponse getResponse) { - if (getResponse.isExists()) { - innerDoExecute(request, getResponse.getSourceAsBytesRef(), listener); - } else { - onFailure(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", - request.getRequest().index(), request.getRequest().type(), request.getRequest().id())); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - } else { - innerDoExecute(request, null, listener); - } - } - - private void innerDoExecute(PercolateRequest request, BytesReference docSource, ActionListener listener) { - SearchRequest searchRequest; - try { - searchRequest = createSearchRequest(request, docSource, xContentRegistry, parseFieldMatcher); - } catch (IOException e) { - listener.onFailure(e); - return; - } - client.search(searchRequest, new ActionListener() { - @Override - public void onResponse(SearchResponse searchResponse) { - try { - listener.onResponse(createPercolateResponse(searchResponse, request.onlyCount())); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - } - - public static SearchRequest createSearchRequest(PercolateRequest percolateRequest, BytesReference documentSource, - NamedXContentRegistry xContentRegistry, - ParseFieldMatcher parseFieldMatcher) - throws IOException { - SearchRequest searchRequest = new SearchRequest(); - if (percolateRequest.indices() != null) { - searchRequest.indices(percolateRequest.indices()); - } - searchRequest.indicesOptions(percolateRequest.indicesOptions()); - searchRequest.routing(percolateRequest.routing()); - searchRequest.preference(percolateRequest.preference()); - - BytesReference querySource = null; - XContentBuilder searchSource = XContentFactory.jsonBuilder().startObject(); - if (percolateRequest.source() != null && percolateRequest.source().length() > 0) { - try (XContentParser parser = XContentHelper.createParser(xContentRegistry, percolateRequest.source())) { - String currentFieldName = null; - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("Unknown token [" + token+ "]"); - } - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("doc".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.copyCurrentStructure(parser); - builder.flush(); - documentSource = builder.bytes(); - } else if ("query".equals(currentFieldName) || "filter".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.copyCurrentStructure(parser); - builder.flush(); - querySource = builder.bytes(); - } else if ("sort".equals(currentFieldName)) { - searchSource.field("sort"); - searchSource.copyCurrentStructure(parser); - } else if ("aggregations".equals(currentFieldName)) { - searchSource.field("aggregations"); - searchSource.copyCurrentStructure(parser); - } else if ("highlight".equals(currentFieldName)) { - searchSource.field("highlight"); - searchSource.copyCurrentStructure(parser); - } else { - throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("sort".equals(currentFieldName)) { - searchSource.field("sort"); - searchSource.copyCurrentStructure(parser); - } else { - throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); - } - } else if (token.isValue()) { - if ("size".equals(currentFieldName)) { - if (percolateRequest.onlyCount()) { - throw new IllegalArgumentException("Cannot set size if onlyCount == true"); - } - searchSource.field("size", parser.intValue()); - } else if ("sort".equals(currentFieldName)) { - searchSource.field("sort", parser.text()); - } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { - searchSource.field("track_scores", parser.booleanValue()); - } else { - throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); - } - } else { - throw new IllegalArgumentException("Unknown token [" + token + "]"); - } - } - } - } - - if (percolateRequest.onlyCount()) { - searchSource.field("size", 0); - } - - PercolateQueryBuilder percolateQueryBuilder = - new PercolateQueryBuilder("query", percolateRequest.documentType(), documentSource); - if (querySource != null) { - try (XContentParser parser = XContentHelper.createParser(xContentRegistry, querySource)) { - QueryParseContext queryParseContext = new QueryParseContext(parser, parseFieldMatcher); - BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery(); - boolQueryBuilder.must(queryParseContext.parseInnerQueryBuilder()); - boolQueryBuilder.filter(percolateQueryBuilder); - searchSource.field("query", boolQueryBuilder); - } - } else { - // wrapping in a constant score query with boost 0 for bwc reason. - // percolator api didn't emit scores before and never included scores - // for how well percolator queries matched with the document being percolated - searchSource.field("query", new ConstantScoreQueryBuilder(percolateQueryBuilder).boost(0f)); - } - - searchSource.endObject(); - searchSource.flush(); - BytesReference source = searchSource.bytes(); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xContentRegistry, source)) { - QueryParseContext context = new QueryParseContext(parser, parseFieldMatcher); - searchSourceBuilder.parseXContent(context); - searchRequest.source(searchSourceBuilder); - return searchRequest; - } - } - - public static PercolateResponse createPercolateResponse(SearchResponse searchResponse, boolean onlyCount) { - SearchHits hits = searchResponse.getHits(); - PercolateResponse.Match[] matches; - if (onlyCount) { - matches = null; - } else { - matches = new PercolateResponse.Match[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - matches[i] = new PercolateResponse.Match(new Text(hit.getIndex()), - new Text(hit.getId()), hit.getScore(), hit.getHighlightFields()); - } - } - - List shardFailures = new ArrayList<>(searchResponse.getShardFailures().length); - for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) { - shardFailures.add(new DefaultShardOperationFailedException(shardSearchFailure.index(), shardSearchFailure.shardId(), - shardSearchFailure.getCause())); - } - - return new PercolateResponse( - searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(), shardFailures, - matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations() - ); - } - -} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java deleted file mode 100644 index 0a359376f7b..00000000000 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java +++ /dev/null @@ -1,414 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.query.Operator; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -import static org.elasticsearch.percolator.PercolateSourceBuilder.docBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; -import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.percolator.PercolatorTestUtil.assertMatchCount; -import static org.elasticsearch.percolator.PercolatorTestUtil.preparePercolate; -import static org.elasticsearch.percolator.PercolatorTestUtil.prepareMultiPercolate; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class MultiPercolatorIT extends ESIntegTestCase { - - private static final String INDEX_NAME = "queries"; - private static final String TYPE_NAME = "query"; - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(PercolatorPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(PercolatorPlugin.class); - } - - public void testBasics() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "field1", "type=text")); - ensureGreen(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - MultiPercolateResponse response = prepareMultiPercolate(client()) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))) - .add(preparePercolate(client()) // non existing doc, so error element - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("5"))) - .execute().actionGet(); - - MultiPercolateResponse.Item item = response.getItems()[0]; - assertMatchCount(item.getResponse(), 2L); - assertThat(item.getResponse().getMatches(), arrayWithSize(2)); - assertThat(item.getErrorMessage(), nullValue()); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4")); - - item = response.getItems()[1]; - assertThat(item.getErrorMessage(), nullValue()); - - assertMatchCount(item.getResponse(), 2L); - assertThat(item.getResponse().getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - - item = response.getItems()[2]; - assertThat(item.getErrorMessage(), nullValue()); - assertMatchCount(item.getResponse(), 4L); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4")); - - item = response.getItems()[3]; - assertThat(item.getErrorMessage(), nullValue()); - assertMatchCount(item.getResponse(), 1L); - assertThat(item.getResponse().getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContaining("4")); - - item = response.getItems()[4]; - assertThat(item.getResponse(), nullValue()); - assertThat(item.getErrorMessage(), notNullValue()); - assertThat(item.getErrorMessage(), containsString("[" + INDEX_NAME + "/type/5] doesn't exist")); - } - - public void testWithRouting() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "field1", "type=text")); - ensureGreen(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setRouting("a") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setRouting("a") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setRouting("a") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setRouting("a") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - MultiPercolateResponse response = prepareMultiPercolate(client()) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setRouting("a") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setRouting("a") - .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setRouting("a") - .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))) - .add(preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setRouting("a") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))) - .add(preparePercolate(client()) // non existing doc, so error element - .setIndices(INDEX_NAME).setDocumentType("type") - .setRouting("a") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("5"))) - .execute().actionGet(); - - MultiPercolateResponse.Item item = response.getItems()[0]; - assertMatchCount(item.getResponse(), 2L); - assertThat(item.getResponse().getMatches(), arrayWithSize(2)); - assertThat(item.getErrorMessage(), nullValue()); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4")); - - item = response.getItems()[1]; - assertThat(item.getErrorMessage(), nullValue()); - - assertMatchCount(item.getResponse(), 2L); - assertThat(item.getResponse().getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - - item = response.getItems()[2]; - assertThat(item.getErrorMessage(), nullValue()); - assertMatchCount(item.getResponse(), 4L); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4")); - - item = response.getItems()[3]; - assertThat(item.getErrorMessage(), nullValue()); - assertMatchCount(item.getResponse(), 1L); - assertThat(item.getResponse().getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContaining("4")); - - item = response.getItems()[4]; - assertThat(item.getResponse(), nullValue()); - assertThat(item.getErrorMessage(), notNullValue()); - assertThat(item.getErrorMessage(), containsString("[" + INDEX_NAME + "/type/5] doesn't exist")); - } - - public void testExistingDocsOnly() throws Exception { - prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get(); - - int numQueries = randomIntBetween(50, 100); - logger.info("--> register a queries"); - for (int i = 0; i < numQueries; i++) { - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - } - - client().prepareIndex(INDEX_NAME, "type", "1") - .setSource(jsonBuilder().startObject().field("field", "a").endObject()) - .execute().actionGet(); - refresh(); - - MultiPercolateRequestBuilder builder = prepareMultiPercolate(client()); - int numPercolateRequest = randomIntBetween(50, 100); - for (int i = 0; i < numPercolateRequest; i++) { - builder.add( - preparePercolate(client()) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1")) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(numQueries) - ); - } - - MultiPercolateResponse response = builder.execute().actionGet(); - assertThat(response.items().length, equalTo(numPercolateRequest)); - for (MultiPercolateResponse.Item item : response) { - assertThat(item.isFailure(), equalTo(false)); - assertMatchCount(item.getResponse(), numQueries); - assertThat(item.getResponse().getMatches().length, equalTo(numQueries)); - } - - // Non existing doc - builder = prepareMultiPercolate(client()); - for (int i = 0; i < numPercolateRequest; i++) { - builder.add( - preparePercolate(client()) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2")) - .setIndices(INDEX_NAME).setDocumentType("type").setSize(numQueries) - - ); - } - - response = builder.execute().actionGet(); - assertThat(response.items().length, equalTo(numPercolateRequest)); - for (MultiPercolateResponse.Item item : response) { - assertThat(item.isFailure(), equalTo(true)); - assertThat(item.getErrorMessage(), containsString("doesn't exist")); - assertThat(item.getResponse(), nullValue()); - } - - // One existing doc - builder = prepareMultiPercolate(client()); - for (int i = 0; i < numPercolateRequest; i++) { - builder.add( - preparePercolate(client()) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2")) - .setIndices(INDEX_NAME).setDocumentType("type").setSize(numQueries) - ); - } - builder.add( - preparePercolate(client()) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1")) - .setIndices(INDEX_NAME).setDocumentType("type").setSize(numQueries) - ); - - response = builder.execute().actionGet(); - assertThat(response.items().length, equalTo(numPercolateRequest + 1)); - assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false)); - assertMatchCount(response.items()[numPercolateRequest].getResponse(), numQueries); - assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries)); - } - - public void testWithDocsOnly() throws Exception { - prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get(); - ensureGreen(); - - int numQueries = randomIntBetween(50, 100); - logger.info("--> register a queries"); - for (int i = 0; i < numQueries; i++) { - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - } - refresh(); - - MultiPercolateRequestBuilder builder = prepareMultiPercolate(client()); - int numPercolateRequest = randomIntBetween(50, 100); - for (int i = 0; i < numPercolateRequest; i++) { - builder.add( - preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(numQueries) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject()))); - } - - MultiPercolateResponse response = builder.execute().actionGet(); - assertThat(response.items().length, equalTo(numPercolateRequest)); - for (MultiPercolateResponse.Item item : response) { - assertThat(item.isFailure(), equalTo(false)); - assertMatchCount(item.getResponse(), numQueries); - assertThat(item.getResponse().getMatches().length, equalTo(numQueries)); - } - - // All illegal json - builder = prepareMultiPercolate(client()); - for (int i = 0; i < numPercolateRequest; i++) { - builder.add( - preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSource("illegal json")); - } - - response = builder.execute().actionGet(); - assertThat(response.items().length, equalTo(numPercolateRequest)); - for (MultiPercolateResponse.Item item : response) { - assertThat(item.isFailure(), equalTo(true)); - assertThat(item.getFailure(), notNullValue()); - } - - // one valid request - builder = prepareMultiPercolate(client()); - for (int i = 0; i < numPercolateRequest; i++) { - builder.add( - preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSource("illegal json")); - } - builder.add( - preparePercolate(client()) - .setSize(numQueries) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject()))); - - response = builder.execute().actionGet(); - assertThat(response.items().length, equalTo(numPercolateRequest + 1)); - assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false)); - assertMatchCount(response.items()[numPercolateRequest].getResponse(), numQueries); - assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries)); - } - - public void testNestedMultiPercolation() throws IOException { - initNestedIndexAndPercolation(); - MultiPercolateRequestBuilder mpercolate= prepareMultiPercolate(client()); - mpercolate.add(preparePercolate(client()).setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices(INDEX_NAME).setDocumentType("company")); - mpercolate.add(preparePercolate(client()).setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices(INDEX_NAME).setDocumentType("company")); - MultiPercolateResponse response = mpercolate.get(); - assertEquals(response.getItems()[0].getResponse().getMatches().length, 0); - assertEquals(response.getItems()[1].getResponse().getMatches().length, 1); - assertEquals(response.getItems()[1].getResponse().getMatches()[0].getId().string(), "Q"); - } - - void initNestedIndexAndPercolation() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject().startObject("properties").startObject("companyname").field("type", "text").endObject() - .startObject("employee").field("type", "nested").startObject("properties") - .startObject("name").field("type", "text").endObject().endObject().endObject().endObject() - .endObject(); - - assertAcked(client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("company", mapping)); - ensureGreen(INDEX_NAME); - - client().prepareIndex(INDEX_NAME, TYPE_NAME, "Q").setSource(jsonBuilder().startObject() - .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg)).endObject()).get(); - - refresh(); - - } - - XContentBuilder getMatchingNestedDoc() throws IOException { - XContentBuilder doc = XContentFactory.jsonBuilder(); - doc.startObject().field("companyname", "stark").startArray("employee") - .startObject().field("name", "virginia potts").endObject() - .startObject().field("name", "tony stark").endObject() - .endArray().endObject(); - return doc; - } - - XContentBuilder getNotMatchingNestedDoc() throws IOException { - XContentBuilder doc = XContentFactory.jsonBuilder(); - doc.startObject().field("companyname", "notstark").startArray("employee") - .startObject().field("name", "virginia stark").endObject() - .startObject().field("name", "tony potts").endObject() - .endArray().endObject(); - return doc; - } - -} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/MultiPercolatorRequestTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/MultiPercolatorRequestTests.java deleted file mode 100644 index 8e3eeebc622..00000000000 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/MultiPercolatorRequestTests.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.StreamsUtils; - -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class MultiPercolatorRequestTests extends ESTestCase { - public void testParseBulkRequests() throws Exception { - byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/percolator/mpercolate1.json"); - MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length); - - assertThat(request.requests().size(), equalTo(8)); - PercolateRequest percolateRequest = request.requests().get(0); - assertThat(percolateRequest.indices()[0], equalTo("my-index1")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed())); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - Map sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map())); - - percolateRequest = request.requests().get(1); - assertThat(percolateRequest.indices()[0], equalTo("my-index2")); - assertThat(percolateRequest.indices()[1], equalTo("my-index3")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map())); - - percolateRequest = request.requests().get(2); - assertThat(percolateRequest.indices()[0], equalTo("my-index4")); - assertThat(percolateRequest.indices()[1], equalTo("my-index5")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(percolateRequest.onlyCount(), equalTo(true)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map())); - - percolateRequest = request.requests().get(3); - assertThat(percolateRequest.indices()[0], equalTo("my-index6")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), notNullValue()); - assertThat(percolateRequest.getRequest().id(), equalTo("1")); - assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); - assertThat(percolateRequest.getRequest().index(), equalTo("my-index6")); - assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.getRequest().preference(), equalTo("_local")); - - percolateRequest = request.requests().get(4); - assertThat(percolateRequest.indices()[0], equalTo("my-index7")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed())); - assertThat(percolateRequest.onlyCount(), equalTo(true)); - assertThat(percolateRequest.getRequest(), notNullValue()); - assertThat(percolateRequest.getRequest().id(), equalTo("2")); - assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); - assertThat(percolateRequest.getRequest().index(), equalTo("my-index7")); - assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.getRequest().preference(), equalTo("_local")); - - percolateRequest = request.requests().get(5); - assertThat(percolateRequest.indices()[0], equalTo("my-index8")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("primary")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed())); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value4").map())); - - percolateRequest = request.requests().get(6); - assertThat(percolateRequest.indices()[0], equalTo("percolate-index1")); - assertThat(percolateRequest.documentType(), equalTo("other-type")); - assertThat(percolateRequest.routing(), equalTo("percolate-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.getRequest(), notNullValue()); - assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index9")); - assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); - assertThat(percolateRequest.getRequest().routing(), nullValue()); - assertThat(percolateRequest.getRequest().preference(), nullValue()); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed())); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), nullValue()); - - percolateRequest = request.requests().get(7); - assertThat(percolateRequest.indices()[0], equalTo("my-index10")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), nullValue()); - assertThat(percolateRequest.preference(), nullValue()); - assertThat(percolateRequest.getRequest(), notNullValue()); - assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index10")); - assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); - assertThat(percolateRequest.getRequest().routing(), nullValue()); - assertThat(percolateRequest.getRequest().preference(), nullValue()); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, false, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), nullValue()); - } - - public void testParseBulkRequestsDefaults() throws Exception { - byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/percolator/mpercolate2.json"); - MultiPercolateRequest request = new MultiPercolateRequest(); - request.indices("my-index1").documentType("my-type1").indicesOptions(IndicesOptions.lenientExpandOpen()); - request.add(data, 0, data.length); - - assertThat(request.requests().size(), equalTo(3)); - PercolateRequest percolateRequest = request.requests().get(0); - assertThat(percolateRequest.indices()[0], equalTo("my-index1")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen())); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - Map sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map())); - - percolateRequest = request.requests().get(1); - assertThat(percolateRequest.indices()[0], equalTo("my-index1")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.routing(), equalTo("my-routing-1")); - assertThat(percolateRequest.preference(), equalTo("_local")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen())); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map())); - - percolateRequest = request.requests().get(2); - assertThat(percolateRequest.indices()[0], equalTo("my-index1")); - assertThat(percolateRequest.documentType(), equalTo("my-type1")); - assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen())); - assertThat(percolateRequest.onlyCount(), equalTo(false)); - assertThat(percolateRequest.getRequest(), nullValue()); - assertThat(percolateRequest.source(), notNullValue()); - sourceMap = createParser(JsonXContent.jsonXContent, percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map())); - } - -} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java deleted file mode 100644 index 3577745380c..00000000000 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import static org.elasticsearch.percolator.PercolateSourceBuilder.docBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.percolator.PercolatorTestUtil.assertMatchCount; -import static org.elasticsearch.percolator.PercolatorTestUtil.preparePercolate; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; - -public class PercolatorAggregationsIT extends ESIntegTestCase { - - private static final String INDEX_NAME = "queries"; - private static final String TYPE_NAME = "query"; - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(PercolatorPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(PercolatorPlugin.class); - } - - // Just test the integration with facets and aggregations, not the facet and aggregation functionality! - public void testAggregations() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "field1", "type=text", "field2", "type=keyword")); - ensureGreen(); - - int numQueries = scaledRandomIntBetween(250, 500); - int numUniqueQueries = between(1, numQueries / 2); - String[] values = new String[numUniqueQueries]; - for (int i = 0; i < values.length; i++) { - values[i] = "value" + i; - } - int[] expectedCount = new int[numUniqueQueries]; - - logger.info("--> registering {} queries", numQueries); - for (int i = 0; i < numQueries; i++) { - String value = values[i % numUniqueQueries]; - expectedCount[i % numUniqueQueries]++; - QueryBuilder queryBuilder = matchQuery("field1", value); - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() - .actionGet(); - } - refresh(); - - for (int i = 0; i < numQueries; i++) { - String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client()) - .setIndices(INDEX_NAME) - .setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); - - SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); - - if (randomBoolean()) { - percolateRequestBuilder.setPercolateQuery(matchAllQuery()); - } - - boolean countOnly = randomBoolean(); - if (countOnly) { - percolateRequestBuilder.setOnlyCount(countOnly); - } else { - // can only set size if we also keep track of matches (i.e. countOnly == false) - if (randomBoolean()) { - percolateRequestBuilder.setScore(true).setSize(expectedCount[i % numUniqueQueries]); - } else { - percolateRequestBuilder.setSortByScore(true).setSize(numQueries); - } - } - - PercolateResponse response = percolateRequestBuilder.execute().actionGet(); - assertMatchCount(response, expectedCount[i % numUniqueQueries]); - if (!countOnly) { - assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries])); - } - - List aggregations = response.getAggregations().asList(); - assertThat(aggregations.size(), equalTo(1)); - assertThat(aggregations.get(0).getName(), equalTo("a")); - List buckets = new ArrayList<>(((Terms) aggregations.get(0)).getBuckets()); - assertThat(buckets.size(), equalTo(1)); - assertThat(buckets.get(0).getKeyAsString(), equalTo("b")); - assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length])); - } - } - - // Just test the integration with facets and aggregations, not the facet and aggregation functionality! - public void testAggregationsAndPipelineAggregations() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "field1", "type=text", "field2", "type=keyword")); - ensureGreen(); - - int numQueries = scaledRandomIntBetween(250, 500); - int numUniqueQueries = between(1, numQueries / 2); - String[] values = new String[numUniqueQueries]; - for (int i = 0; i < values.length; i++) { - values[i] = "value" + i; - } - int[] expectedCount = new int[numUniqueQueries]; - - logger.info("--> registering {} queries", numQueries); - for (int i = 0; i < numQueries; i++) { - String value = values[i % numUniqueQueries]; - expectedCount[i % numUniqueQueries]++; - QueryBuilder queryBuilder = matchQuery("field1", value); - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() - .actionGet(); - } - refresh(); - - for (int i = 0; i < numQueries; i++) { - String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client()) - .setIndices(INDEX_NAME) - .setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); - - SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); - - if (randomBoolean()) { - percolateRequestBuilder.setPercolateQuery(matchAllQuery()); - } - - boolean countOnly = randomBoolean(); - if (countOnly) { - percolateRequestBuilder.setOnlyCount(countOnly); - } else { - // can only set size if we also keep track of matches (i.e. countOnly == false) - if (randomBoolean()) { - percolateRequestBuilder.setScore(true).setSize(expectedCount[i % numUniqueQueries]); - } else { - percolateRequestBuilder.setSortByScore(true).setSize(numQueries); - } - } - - percolateRequestBuilder.addAggregation(PipelineAggregatorBuilders.maxBucket("max_a", "a>_count")); - - PercolateResponse response = percolateRequestBuilder.execute().actionGet(); - assertMatchCount(response, expectedCount[i % numUniqueQueries]); - if (!countOnly) { - assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries])); - } - - Aggregations aggregations = response.getAggregations(); - assertThat(aggregations.asList().size(), equalTo(2)); - Terms terms = aggregations.get("a"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("a")); - List buckets = new ArrayList<>(terms.getBuckets()); - assertThat(buckets.size(), equalTo(1)); - assertThat(buckets.get(0).getKeyAsString(), equalTo("b")); - assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length])); - - InternalBucketMetricValue maxA = aggregations.get("max_a"); - assertThat(maxA, notNullValue()); - assertThat(maxA.getName(), equalTo("max_a")); - assertThat(maxA.value(), equalTo((double) expectedCount[i % values.length])); - assertThat(maxA.keys(), equalTo(new String[] { "b" })); - } - } - - public void testSignificantAggs() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .execute().actionGet(); - ensureGreen(); - PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client()).setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject())) - .addAggregation(AggregationBuilders.significantTerms("a").field("field2")); - PercolateResponse response = percolateRequestBuilder.get(); - assertNoFailures(response); - } - - public void testSingleShardAggregations() throws Exception { - assertAcked(prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1)) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "field1", "type=text", "field2", "type=keyword")); - ensureGreen(); - - int numQueries = scaledRandomIntBetween(250, 500); - - logger.info("--> registering {} queries", numQueries); - for (int i = 0; i < numQueries; i++) { - String value = "value0"; - QueryBuilder queryBuilder = matchQuery("field1", value); - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", i % 3 == 0 ? "b" : "a").endObject()) - .execute() - .actionGet(); - } - refresh(); - - for (int i = 0; i < numQueries; i++) { - String value = "value0"; - PercolateRequestBuilder percolateRequestBuilder = preparePercolate(client()) - .setIndices(INDEX_NAME) - .setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); - - SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - percolateRequestBuilder.addAggregation(AggregationBuilders.terms("terms").field("field2").collectMode(aggCollectionMode) - .order(Order.term(true)).shardSize(2).size(1)); - - if (randomBoolean()) { - percolateRequestBuilder.setPercolateQuery(matchAllQuery()); - } - - boolean countOnly = randomBoolean(); - if (countOnly) { - percolateRequestBuilder.setOnlyCount(countOnly); - } else { - // can only set size if we also keep track of matches (i.e. countOnly == false) - if (randomBoolean()) { - percolateRequestBuilder.setScore(true).setSize(numQueries); - } else { - percolateRequestBuilder.setSortByScore(true).setSize(numQueries); - } - } - - percolateRequestBuilder.addAggregation(PipelineAggregatorBuilders.maxBucket("max_terms", "terms>_count")); - - PercolateResponse response = percolateRequestBuilder.execute().actionGet(); - assertMatchCount(response, numQueries); - if (!countOnly) { - assertThat(response.getMatches(), arrayWithSize(numQueries)); - } - - Aggregations aggregations = response.getAggregations(); - assertThat(aggregations.asList().size(), equalTo(2)); - Terms terms = aggregations.get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = new ArrayList<>(terms.getBuckets()); - assertThat(buckets.size(), equalTo(1)); - assertThat(buckets.get(0).getKeyAsString(), equalTo("a")); - - InternalBucketMetricValue maxA = aggregations.get("max_terms"); - assertThat(maxA, notNullValue()); - assertThat(maxA.getName(), equalTo("max_terms")); - assertThat(maxA.keys(), equalTo(new String[] { "a" })); - } - } -} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java index 8efd3508398..28b3e6a2fdd 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.percolator; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java deleted file mode 100644 index 70c7f651922..00000000000 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ /dev/null @@ -1,1839 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import com.vividsolutions.jts.geom.Coordinate; - -import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.geo.builders.ShapeBuilders; -import org.elasticsearch.common.lucene.search.function.CombineFunction; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.query.Operator; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.functionscore.WeightBuilder; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.test.ESIntegTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.TreeSet; -import java.util.function.Function; - -import static org.elasticsearch.percolator.PercolateSourceBuilder.docBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; -import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; -import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; -import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; -import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.percolator.PercolatorTestUtil.assertMatchCount; -import static org.elasticsearch.percolator.PercolatorTestUtil.preparePercolate; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.nullValue; - -public class PercolatorIT extends ESIntegTestCase { - - private static final String INDEX_NAME = "queries"; - private static final String TYPE_NAME = "query"; - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(PercolatorPlugin.class); - } - - - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(PercolatorPlugin.class); - } - - public void testSimple1() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get(); - ensureGreen(); - - logger.info("--> Add dummy doc"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1").setSource("field1", "value").execute().actionGet(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Percolate doc with field1=b"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4")); - - logger.info("--> Percolate doc with field1=c"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - - logger.info("--> Percolate doc with field1=b c"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())) - .execute().actionGet(); - assertMatchCount(response, 4L); - assertThat(response.getMatches(), arrayWithSize(4)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4")); - - logger.info("--> Percolate doc with field1=d"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContaining("4")); - - logger.info("--> Percolate non existing doc"); - try { - preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("5")) - .execute().actionGet(); - fail("Exception should have been thrown"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("percolate document [queries/type/5] doesn't exist")); - } - } - - public void testSimple2() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping("type1", "field1", "type=long", "field2", "type=text") - .addMapping(TYPE_NAME, "query", "type=percolator") - ); - ensureGreen(); - - // introduce the doc - XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field1", 1) - .field("field2", "value") - .endObject().endObject(); - - PercolateResponse response = preparePercolate(client()).setSource(doc) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .execute().actionGet(); - assertMatchCount(response, 0L); - assertThat(response.getMatches(), emptyArray()); - - // add first query... - client().prepareIndex(INDEX_NAME, TYPE_NAME, "test1") - .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject()) - .execute().actionGet(); - refresh(); - - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType(TYPE_NAME) - .setSource(doc).execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContaining("test1")); - - // add second query... - client().prepareIndex(INDEX_NAME, TYPE_NAME, "test2") - .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject()) - .execute().actionGet(); - refresh(); - - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(doc) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("test1", "test2")); - - - client().prepareDelete(INDEX_NAME, TYPE_NAME, "test2").execute().actionGet(); - refresh(); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(doc).execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContaining("test1")); - } - - public void testPercolateQueriesWithRouting() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .setSettings(Settings.builder().put("index.number_of_shards", 2)) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "field1", "type=text") - .execute().actionGet(); - ensureGreen(); - - logger.info("--> register a queries"); - for (int i = 1; i <= 100; i++) { - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .setRouting(Integer.toString(i % 2)) - .execute().actionGet(); - } - refresh(); - - logger.info("--> Percolate doc with no routing"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())) - .setSize(100) - .execute().actionGet(); - assertMatchCount(response, 100L); - assertThat(response.getMatches(), arrayWithSize(100)); - - logger.info("--> Percolate doc with routing=0"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())) - .setSize(100) - .setRouting("0") - .execute().actionGet(); - assertMatchCount(response, 50L); - assertThat(response.getMatches(), arrayWithSize(50)); - - logger.info("--> Percolate doc with routing=1"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())) - .setSize(100) - .setRouting("1") - .execute().actionGet(); - assertMatchCount(response, 50L); - assertThat(response.getMatches(), arrayWithSize(50)); - } - - public void storePercolateQueriesOnRecreatedIndex() throws Exception { - prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get(); - ensureGreen(); - - client().prepareIndex(INDEX_NAME, "test", "1").setSource("field1", "value1").execute().actionGet(); - logger.info("--> register a query"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "kuku1") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - cluster().wipeIndices("test"); - createIndex("test"); - ensureGreen(); - - client().prepareIndex(INDEX_NAME, "test", "1").setSource("field1", "value1").execute().actionGet(); - logger.info("--> register a query"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "kuku2") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - } - - // see #2814 - public void testPercolateCustomAnalyzer() throws Exception { - Builder builder = Settings.builder(); - builder.put("index.analysis.analyzer.lwhitespacecomma.tokenizer", "whitespacecomma"); - builder.putArray("index.analysis.analyzer.lwhitespacecomma.filter", "lowercase"); - builder.put("index.analysis.tokenizer.whitespacecomma.type", "pattern"); - builder.put("index.analysis.tokenizer.whitespacecomma.pattern", "(,|\\s+)"); - - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("doc") - .startObject("properties") - .startObject("filingcategory").field("type", "text").field("analyzer", "lwhitespacecomma").endObject() - .endObject() - .endObject().endObject(); - - assertAcked(prepareCreate(INDEX_NAME).setSettings(builder) - .addMapping("doc", mapping) - .addMapping(TYPE_NAME, "query", "type=percolator") - ); - ensureGreen(); - - logger.info("--> register a query"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject() - .field("source", "productizer") - .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("filingcategory:s"))) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - refresh(); - - PercolateResponse percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("doc") - .setSource(jsonBuilder().startObject() - .startObject("doc").field("filingcategory", "s").endObject() - .field("query", termQuery("source", "productizer")) - .endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - - } - - public void testCreateIndexAndThenRegisterPercolator() throws Exception { - prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type1", "field1", "type=text") - .get(); - ensureGreen(); - - logger.info("--> register a query"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "kuku") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .execute().actionGet(); - refresh(); - SearchResponse countResponse = client().prepareSearch().setSize(0) - .setQuery(matchAllQuery()).setTypes(TYPE_NAME) - .execute().actionGet(); - assertThat(countResponse.getHits().totalHits(), equalTo(1L)); - - - for (int i = 0; i < 10; i++) { - PercolateResponse percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - } - - for (int i = 0; i < 10; i++) { - PercolateResponse percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setPreference("_local") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - } - - - logger.info("--> delete the index"); - client().admin().indices().prepareDelete(INDEX_NAME).execute().actionGet(); - logger.info("--> make sure percolated queries for it have been deleted as well"); - countResponse = client().prepareSearch().setSize(0) - .setQuery(matchAllQuery()).setTypes(TYPE_NAME) - .execute().actionGet(); - assertHitCount(countResponse, 0L); - } - - public void testMultiplePercolators() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type1", "field1", "type=text") - ); - ensureGreen(); - - logger.info("--> register a query 1"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "kuku") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - logger.info("--> register a query 2"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "bubu") - .setSource(jsonBuilder().startObject() - .field("color", "green") - .field("query", termQuery("field1", "value2")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - PercolateResponse percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("kuku")); - - percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("bubu")); - - } - - public void testDynamicAddingRemovingQueries() throws Exception { - assertAcked( - prepareCreate(INDEX_NAME) - .addMapping("type1", "field1", "type=text") - .addMapping(TYPE_NAME, "query", "type=percolator") - ); - ensureGreen(); - - logger.info("--> register a query 1"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "kuku") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - PercolateResponse percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("kuku")); - - logger.info("--> register a query 2"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "bubu") - .setSource(jsonBuilder().startObject() - .field("color", "green") - .field("query", termQuery("field1", "value2")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("bubu")); - - logger.info("--> register a query 3"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "susu") - .setSource(jsonBuilder().startObject() - .field("color", "red") - .field("query", termQuery("field1", "value2")) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - PercolateSourceBuilder sourceBuilder = new PercolateSourceBuilder() - .setDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value2").endObject())) - .setQueryBuilder(termQuery("color", "red")); - percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(sourceBuilder) - .execute().actionGet(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("susu")); - - logger.info("--> deleting query 1"); - client().prepareDelete(INDEX_NAME, TYPE_NAME, "kuku").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - - percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").startObject("type1") - .field("field1", "value1") - .endObject().endObject().endObject()) - .execute().actionGet(); - assertMatchCount(percolate, 0L); - assertThat(percolate.getMatches(), emptyArray()); - } - - public void testPercolatingExistingDocs() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - logger.info("--> Adding docs"); - client().prepareIndex(INDEX_NAME, "type", "1").setSource("field1", "b").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "2").setSource("field1", "c").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "3").setSource("field1", "b c").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "4").setSource("field1", "d").execute().actionGet(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Percolate existing doc with id 1"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1")) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4")); - - logger.info("--> Percolate existing doc with id 2"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2")) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - - logger.info("--> Percolate existing doc with id 3"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("3")) - .execute().actionGet(); - assertMatchCount(response, 4L); - assertThat(response.getMatches(), arrayWithSize(4)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4")); - - logger.info("--> Percolate existing doc with id 4"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("4")) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContaining("4")); - } - - public void testPercolatingExistingDocs_routing() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .execute().actionGet(); - ensureGreen(); - - logger.info("--> Adding docs"); - client().prepareIndex(INDEX_NAME, "type", "1").setSource("field1", "b").setRouting("4").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "2").setSource("field1", "c").setRouting("3").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "3").setSource("field1", "b c").setRouting("2").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "4").setSource("field1", "d").setRouting("1").execute().actionGet(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Percolate existing doc with id 1"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1").routing("4")) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4")); - - logger.info("--> Percolate existing doc with id 2"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2").routing("3")) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - - logger.info("--> Percolate existing doc with id 3"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("3").routing("2")) - .execute().actionGet(); - assertMatchCount(response, 4L); - assertThat(response.getMatches(), arrayWithSize(4)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4")); - - logger.info("--> Percolate existing doc with id 4"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("4").routing("1")) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContaining("4")); - } - - public void testPercolatingExistingDocs_versionCheck() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - logger.info("--> Adding docs"); - client().prepareIndex(INDEX_NAME, "type", "1").setSource("field1", "b").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "2").setSource("field1", "c").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "3").setSource("field1", "b c").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "4").setSource("field1", "d").execute().actionGet(); - - logger.info("--> registering queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Percolate existing doc with id 2 and version 1"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2").version(1L)) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - - logger.info("--> Percolate existing doc with id 2 and version 2"); - try { - preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2").version(2L)) - .execute().actionGet(); - fail("Error should have been thrown"); - } catch (VersionConflictEngineException e) { - } - - logger.info("--> Index doc with id for the second time"); - client().prepareIndex(INDEX_NAME, "type", "2").setSource("field1", "c").execute().actionGet(); - - logger.info("--> Percolate existing doc with id 2 and version 2"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2").version(2L)) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4")); - } - - public void testPercolateMultipleIndicesAndAliases() throws Exception { - prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get(); - prepareCreate(INDEX_NAME + "2").addMapping(TYPE_NAME, "query", "type=percolator").get(); - ensureGreen(); - - logger.info("--> registering queries"); - for (int i = 1; i <= 10; i++) { - String index = i % 2 == 0 ? INDEX_NAME : INDEX_NAME + "2"; - client().prepareIndex(index, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - } - refresh(); - - logger.info("--> Percolate doc to index test1"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - - logger.info("--> Percolate doc to index test2"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME + "2").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - - logger.info("--> Percolate doc to index test1 and test2"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME, INDEX_NAME + "2").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 10L); - assertThat(response.getMatches(), arrayWithSize(10)); - - logger.info("--> Percolate doc to index test2 and test3, with ignore missing"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME , INDEX_NAME + "3").setDocumentType("type") - .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - - logger.info("--> Adding aliases"); - IndicesAliasesResponse aliasesResponse = client().admin().indices().prepareAliases() - .addAlias(INDEX_NAME, "my-alias1") - .addAlias(INDEX_NAME + "2", "my-alias1") - .addAlias(INDEX_NAME + "2", "my-alias2") - .setTimeout(TimeValue.timeValueHours(10)) - .execute().actionGet(); - assertTrue(aliasesResponse.isAcknowledged()); - - logger.info("--> Percolate doc to my-alias1"); - response = preparePercolate(client()) - .setIndices("my-alias1").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 10L); - assertThat(response.getMatches(), arrayWithSize(10)); - for (PercolateResponse.Match match : response) { - assertThat(match.getIndex().string(), anyOf(equalTo(INDEX_NAME), equalTo(INDEX_NAME + "2"))); - } - - logger.info("--> Percolate doc to my-alias2"); - response = preparePercolate(client()) - .setIndices("my-alias2").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - for (PercolateResponse.Match match : response) { - assertThat(match.getIndex().string(), equalTo(INDEX_NAME + "2")); - } - } - - public void testPercolateWithAliasFilter() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("my-type", "a", "type=keyword") - .addAlias(new Alias("a").filter(QueryBuilders.termQuery("a", "a"))) - .addAlias(new Alias("b").filter(QueryBuilders.termQuery("a", "b"))) - .addAlias(new Alias("c").filter(QueryBuilders.termQuery("a", "c"))) - ); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "a").endObject()) - .get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "b").endObject()) - .get(); - refresh(); - - // Specifying only the document to percolate and no filter, sorting or aggs, the queries are retrieved from - // memory directly. Otherwise we need to retrieve those queries from lucene to be able to execute filters, - // aggregations and sorting on top of them. So this test a different code execution path. - PercolateResponse response = preparePercolate(client()) - .setIndices("a") - .setDocumentType("my-type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .get(); - assertNoFailures(response); - assertThat(response.getCount(), equalTo(1L)); - assertThat(response.getMatches()[0].getId().string(), equalTo("1")); - - response = preparePercolate(client()) - .setIndices("b") - .setDocumentType("my-type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .get(); - assertNoFailures(response); - assertThat(response.getCount(), equalTo(1L)); - assertThat(response.getMatches()[0].getId().string(), equalTo("2")); - - - response = preparePercolate(client()) - .setIndices("c") - .setDocumentType("my-type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .get(); - assertNoFailures(response); - assertThat(response.getCount(), equalTo(0L)); - - // Testing that the alias filter and the filter specified while percolating are both taken into account. - response = preparePercolate(client()) - .setIndices("a") - .setDocumentType("my-type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .setPercolateQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(response); - assertThat(response.getCount(), equalTo(1L)); - assertThat(response.getMatches()[0].getId().string(), equalTo("1")); - - response = preparePercolate(client()) - .setIndices("b") - .setDocumentType("my-type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .setPercolateQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(response); - assertThat(response.getCount(), equalTo(1L)); - assertThat(response.getMatches()[0].getId().string(), equalTo("2")); - - - response = preparePercolate(client()) - .setIndices("c") - .setDocumentType("my-type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .setPercolateQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(response); - assertThat(response.getCount(), equalTo(0L)); - } - - public void testCountPercolation() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - logger.info("--> Add dummy doc"); - client().prepareIndex(INDEX_NAME, "type", "1").setSource("field1", "value").execute().actionGet(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Count percolate doc with field1=b"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate doc with field1=c"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate doc with field1=b c"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())) - .execute().actionGet(); - assertMatchCount(response, 4L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate doc with field1=d"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate non existing doc"); - try { - preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("5")) - .execute().actionGet(); - fail("Exception should have been thrown"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("percolate document [" + INDEX_NAME + "/type/5] doesn't exist")); - } - } - - public void testCountPercolatingExistingDocs() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - logger.info("--> Adding docs"); - client().prepareIndex(INDEX_NAME, "type", "1").setSource("field1", "b").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "2").setSource("field1", "c").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "3").setSource("field1", "b c").execute().actionGet(); - client().prepareIndex(INDEX_NAME, "type", "4").setSource("field1", "d").execute().actionGet(); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", boolQuery() - .must(matchQuery("field1", "b")) - .must(matchQuery("field1", "c")) - ).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Count percolate existing doc with id 1"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1")) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate existing doc with id 2"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2")) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate existing doc with id 3"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("3")) - .execute().actionGet(); - assertMatchCount(response, 4L); - assertThat(response.getMatches(), nullValue()); - - logger.info("--> Count percolate existing doc with id 4"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type").setOnlyCount(true) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("4")) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), nullValue()); - } - - public void testPercolateSizingWithQueryAndFilter() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - int numLevels = randomIntBetween(1, 25); - long numQueriesPerLevel = randomIntBetween(10, 250); - long totalQueries = numLevels * numQueriesPerLevel; - logger.info("--> register {} queries", totalQueries); - for (int level = 1; level <= numLevels; level++) { - for (int query = 1; query <= numQueriesPerLevel; query++) { - client().prepareIndex(INDEX_NAME, TYPE_NAME, level + "-" + query) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", level).endObject()) - .execute().actionGet(); - } - } - refresh(); - - boolean onlyCount = randomBoolean(); - PercolateRequestBuilder builder = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("my-type") - .setOnlyCount(onlyCount) - .setPercolateDoc(docBuilder().setDoc("field", "value")); - if (!onlyCount) { - builder.setSize((int) totalQueries); - } - PercolateResponse response = builder.execute().actionGet(); - assertMatchCount(response, totalQueries); - if (!onlyCount) { - assertThat(response.getMatches().length, equalTo((int) totalQueries)); - } - - int size = randomIntBetween(0, (int) totalQueries - 1); - builder = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("my-type") - .setOnlyCount(onlyCount) - .setPercolateDoc(docBuilder().setDoc("field", "value")); - if (!onlyCount) { - builder.setSize(size); - } - response = builder.execute().actionGet(); - assertMatchCount(response, totalQueries); - if (!onlyCount) { - assertThat(response.getMatches().length, equalTo(size)); - } - - // The query / filter capabilities are NOT in realtime - refresh(); - - int runs = randomIntBetween(3, 16); - for (int i = 0; i < runs; i++) { - onlyCount = randomBoolean(); - builder = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("my-type") - .setOnlyCount(onlyCount) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1))); - if (!onlyCount) { - builder.setSize((int) numQueriesPerLevel); - } - response = builder.execute().actionGet(); - assertMatchCount(response, numQueriesPerLevel); - if (!onlyCount) { - assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel)); - } - } - - for (int i = 0; i < runs; i++) { - onlyCount = randomBoolean(); - builder = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("my-type") - .setOnlyCount(onlyCount) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1))); - if (!onlyCount) { - builder.setSize((int) numQueriesPerLevel); - } - response = builder.execute().actionGet(); - assertMatchCount(response, numQueriesPerLevel); - if (!onlyCount) { - assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel)); - } - } - - for (int i = 0; i < runs; i++) { - onlyCount = randomBoolean(); - size = randomIntBetween(0, (int) numQueriesPerLevel - 1); - builder = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("my-type") - .setOnlyCount(onlyCount) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1))); - if (!onlyCount) { - builder.setSize(size); - } - response = builder.execute().actionGet(); - assertMatchCount(response, numQueriesPerLevel); - if (!onlyCount) { - assertThat(response.getMatches().length, equalTo(size)); - } - } - } - - public void testPercolateScoreAndSorting() throws Exception { - prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - // Add a dummy doc, that shouldn't never interfere with percolate operations. - client().prepareIndex(INDEX_NAME, "my-type", "1").setSource("field", "value").execute().actionGet(); - - Map> controlMap = new HashMap<>(); - long numQueries = randomIntBetween(100, 250); - logger.info("--> register {} queries", numQueries); - for (int i = 0; i < numQueries; i++) { - int value = randomInt(10); - client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", i).field("field1", value).endObject()) - .execute().actionGet(); - if (!controlMap.containsKey(value)) { - controlMap.put(value, new TreeSet()); - } - controlMap.get(value).add(i); - } - List usedValues = new ArrayList<>(controlMap.keySet()); - refresh(); - - // Only retrieve the score - int runs = randomInt(27); - for (int i = 0; i < runs; i++) { - int size = randomIntBetween(1, 50); - PercolateResponse response = preparePercolate(client()).setIndices(INDEX_NAME).setDocumentType("my-type") - .setScore(true) - .setSize(size) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) - .execute().actionGet(); - assertMatchCount(response, numQueries); - assertThat(response.getMatches().length, equalTo(size)); - for (int j = 0; j < response.getMatches().length; j++) { - String id = response.getMatches()[j].getId().string(); - assertThat(Integer.valueOf(id), equalTo((int) response.getMatches()[j].getScore())); - } - } - - // Sort the queries by the score - for (int i = 0; i < runs; i++) { - int size = randomIntBetween(1, 10); - PercolateResponse response = preparePercolate(client()).setIndices(INDEX_NAME).setDocumentType("my-type") - .setSortByScore(true) - .setSize(size) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) - .execute().actionGet(); - assertMatchCount(response, numQueries); - assertThat(response.getMatches().length, equalTo(size)); - - int expectedId = (int) (numQueries - 1); - for (PercolateResponse.Match match : response) { - assertThat(match.getId().string(), equalTo(Integer.toString(expectedId))); - assertThat(match.getScore(), equalTo((float) expectedId)); - assertThat(match.getIndex().string(), equalTo(INDEX_NAME)); - expectedId--; - } - } - - - for (int i = 0; i < runs; i++) { - int value = usedValues.get(randomInt(usedValues.size() - 1)); - NavigableSet levels = controlMap.get(value); - int size = randomIntBetween(1, levels.size()); - PercolateResponse response = preparePercolate(client()).setIndices(INDEX_NAME).setDocumentType("my-type") - .setSortByScore(true) - .setSize(size) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery( - QueryBuilders.functionScoreQuery(matchQuery("field1", value), fieldValueFactorFunction("level")) - .boostMode( - CombineFunction.REPLACE)) - .execute().actionGet(); - - assertMatchCount(response, levels.size()); - assertThat(response.getMatches().length, equalTo(Math.min(levels.size(), size))); - Iterator levelIterator = levels.descendingIterator(); - for (PercolateResponse.Match match : response) { - int controlLevel = levelIterator.next(); - assertThat(match.getId().string(), equalTo(Integer.toString(controlLevel))); - assertThat(match.getScore(), equalTo((float) controlLevel)); - assertThat(match.getIndex().string(), equalTo(INDEX_NAME)); - } - } - } - - public void testPercolateSortingWithNoSize() throws Exception { - prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject()) - .execute().actionGet(); - refresh(); - - PercolateResponse response = preparePercolate(client()).setIndices(INDEX_NAME).setDocumentType("my-type") - .setSortByScore(true) - .setSize(2) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) - .execute().actionGet(); - assertMatchCount(response, 2L); - assertThat(response.getMatches()[0].getId().string(), equalTo("2")); - assertThat(response.getMatches()[0].getScore(), equalTo(2f)); - assertThat(response.getMatches()[1].getId().string(), equalTo("1")); - assertThat(response.getMatches()[1].getScore(), equalTo(1f)); - } - - public void testPercolateOnEmptyIndex() throws Exception { - prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - PercolateResponse response = preparePercolate(client()).setIndices(INDEX_NAME).setDocumentType("my-type") - .setSortByScore(true) - .setSize(2) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level").missing(0.0))) - .execute().actionGet(); - assertMatchCount(response, 0L); - } - - public void testPercolatorWithHighlighting() throws Exception { - StringBuilder fieldMapping = new StringBuilder("type=text") - .append(",store=").append(randomBoolean()); - if (randomBoolean()) { - fieldMapping.append(",term_vector=with_positions_offsets"); - } else if (randomBoolean()) { - fieldMapping.append(",index_options=offsets"); - } - assertAcked(prepareCreate(INDEX_NAME) - .addMapping("type", "field1", fieldMapping.toString()) - .addMapping(TYPE_NAME, "query", "type=percolator") - ); - - logger.info("--> register a queries"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "3") - .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "4") - .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject()) - .execute().actionGet(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "5") - .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject()) - .execute().actionGet(); - refresh(); - - logger.info("--> Percolate doc with field1=The quick brown fox jumps over the lazy dog"); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(5) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())) - .setHighlightBuilder(new HighlightBuilder().field("field1")) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4", "5")); - - PercolateResponse.Match[] matches = response.getMatches(); - Arrays.sort(matches, (a, b) -> a.getId().compareTo(b.getId())); - - assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - - logger.info("--> Query percolate doc with field1=The quick brown fox jumps over the lazy dog"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(5) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())) - .setHighlightBuilder(new HighlightBuilder().field("field1")) - .setPercolateQuery(matchAllQuery()) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4", "5")); - - matches = response.getMatches(); - Arrays.sort(matches, new Comparator() { - @Override - public int compare(PercolateResponse.Match a, PercolateResponse.Match b) { - return a.getId().compareTo(b.getId()); - } - }); - - assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - - logger.info("--> Query percolate with score for doc with field1=The quick brown fox jumps over the lazy dog"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(5) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())) - .setHighlightBuilder(new HighlightBuilder().field("field1")) - .setPercolateQuery(functionScoreQuery(new WeightBuilder().setWeight(5.5f))) - .setScore(true) - .execute().actionGet(); - assertNoFailures(response); - assertThat(response.getMatches(), arrayWithSize(5)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4", "5")); - - matches = response.getMatches(); - Arrays.sort(matches, new Comparator() { - @Override - public int compare(PercolateResponse.Match a, PercolateResponse.Match b) { - return a.getId().compareTo(b.getId()); - } - }); - - assertThat(matches[0].getScore(), equalTo(5.5f)); - assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[1].getScore(), equalTo(5.5f)); - assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[2].getScore(), equalTo(5.5f)); - assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[3].getScore(), equalTo(5.5f)); - assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[4].getScore(), equalTo(5.5f)); - assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - - logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(5) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())) - .setHighlightBuilder(new HighlightBuilder().field("field1")) - .setPercolateQuery(functionScoreQuery(new WeightBuilder().setWeight(5.5f))) - .setSortByScore(true) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4", "5")); - - matches = response.getMatches(); - Arrays.sort(matches, new Comparator() { - @Override - public int compare(PercolateResponse.Match a, PercolateResponse.Match b) { - return a.getId().compareTo(b.getId()); - } - }); - - assertThat(matches[0].getScore(), equalTo(5.5f)); - assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[1].getScore(), equalTo(5.5f)); - assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[2].getScore(), equalTo(5.5f)); - assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[3].getScore(), equalTo(5.5f)); - assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[4].getScore(), equalTo(5.5f)); - assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - - logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(5) - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())) - .setHighlightBuilder(new HighlightBuilder().field("field1").highlightQuery(QueryBuilders.matchQuery("field1", "jumps"))) - .setPercolateQuery(functionScoreQuery(new WeightBuilder().setWeight(5.5f))) - .setSortByScore(true) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4", "5")); - - matches = response.getMatches(); - Arrays.sort(matches, new Comparator() { - @Override - public int compare(PercolateResponse.Match a, PercolateResponse.Match b) { - return a.getId().compareTo(b.getId()); - } - }); - - assertThat(matches[0].getScore(), equalTo(5.5f)); - assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[1].getScore(), equalTo(5.5f)); - assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[2].getScore(), equalTo(5.5f)); - assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[3].getScore(), equalTo(5.5f)); - assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[4].getScore(), equalTo(5.5f)); - assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - - // Highlighting an existing doc - client().prepareIndex(INDEX_NAME, "type", "1") - .setSource(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()) - .get(); - refresh(); - - logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog"); - response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setSize(5) - .setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1")) - .setHighlightBuilder(new HighlightBuilder().field("field1")) - .setPercolateQuery(functionScoreQuery(new WeightBuilder().setWeight(5.5f))) - .setSortByScore(true) - .execute().actionGet(); - assertMatchCount(response, 5L); - assertThat(response.getMatches(), arrayWithSize(5)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4", "5")); - - matches = response.getMatches(); - Arrays.sort(matches, new Comparator() { - @Override - public int compare(PercolateResponse.Match a, PercolateResponse.Match b) { - return a.getId().compareTo(b.getId()); - } - }); - - assertThat(matches[0].getScore(), equalTo(5.5f)); - assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[1].getScore(), equalTo(5.5f)); - assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[2].getScore(), equalTo(5.5f)); - assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[3].getScore(), equalTo(5.5f)); - assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - assertThat(matches[4].getScore(), equalTo(5.5f)); - assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - } - - public void testPercolateNonMatchingConstantScoreQuery() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("doc", "message", "type=text")); - ensureGreen(); - - logger.info("--> register a query"); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject() - .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .must(QueryBuilders.queryStringQuery("root")) - .must(QueryBuilders.termQuery("message", "tree")))) - .endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - refresh(); - - PercolateResponse percolate = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("doc") - .setSource(jsonBuilder().startObject() - .startObject("doc").field("message", "A new bonsai tree ").endObject() - .endObject()) - .execute().actionGet(); - assertNoFailures(percolate); - assertMatchCount(percolate, 0L); - } - - public void testNestedPercolation() throws IOException { - initNestedIndexAndPercolation(); - PercolateResponse response = preparePercolate(client()).setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices(INDEX_NAME).setDocumentType("company").get(); - assertEquals(response.getMatches().length, 0); - response = preparePercolate(client()).setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices(INDEX_NAME).setDocumentType("company").get(); - assertEquals(response.getMatches().length, 1); - assertEquals(response.getMatches()[0].getId().string(), "Q"); - } - - public void testNonNestedDocumentDoesNotTriggerAssertion() throws IOException { - initNestedIndexAndPercolation(); - XContentBuilder doc = jsonBuilder(); - doc.startObject(); - doc.field("some_unnested_field", "value"); - doc.endObject(); - PercolateResponse response = preparePercolate(client()).setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(doc)).setIndices(INDEX_NAME).setDocumentType("company").get(); - assertNoFailures(response); - } - - public void testNestedPercolationOnExistingDoc() throws IOException { - initNestedIndexAndPercolation(); - client().prepareIndex(INDEX_NAME, "company", "notmatching").setSource(getNotMatchingNestedDoc()).get(); - client().prepareIndex(INDEX_NAME, "company", "matching").setSource(getMatchingNestedDoc()).get(); - refresh(); - PercolateResponse response = preparePercolate(client()).setGetRequest(Requests.getRequest(INDEX_NAME).type("company").id("notmatching")).setDocumentType("company").setIndices(INDEX_NAME).get(); - assertEquals(response.getMatches().length, 0); - response = preparePercolate(client()).setGetRequest(Requests.getRequest(INDEX_NAME).type("company").id("matching")).setDocumentType("company").setIndices(INDEX_NAME).get(); - assertEquals(response.getMatches().length, 1); - assertEquals(response.getMatches()[0].getId().string(), "Q"); - } - - public void testDontReportDeletedPercolatorDocs() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .execute().actionGet(); - ensureGreen(); - - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .get(); - refresh(); - - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "value").endObject())) - .setPercolateQuery(QueryBuilders.matchAllQuery()) - .get(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1")); - } - - public void testAddQueryWithNoMapping() throws Exception { - client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .get(); - ensureGreen(); - - try { - client().prepareIndex(INDEX_NAME, TYPE_NAME) - .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject()) - .get(); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(QueryShardException.class)); - } - - try { - client().prepareIndex(INDEX_NAME, TYPE_NAME) - .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(0).to(1)).endObject()) - .get(); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(QueryShardException.class)); - } - } - - void initNestedIndexAndPercolation() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject().startObject("properties").startObject("companyname").field("type", "text").endObject() - .startObject("employee").field("type", "nested").startObject("properties") - .startObject("name").field("type", "text").endObject().endObject().endObject().endObject() - .endObject(); - - assertAcked(client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping("company", mapping) - .addMapping(TYPE_NAME, "query", "type=percolator") - ); - ensureGreen(INDEX_NAME); - - client().prepareIndex(INDEX_NAME, TYPE_NAME, "Q").setSource(jsonBuilder().startObject() - .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg)).endObject()).get(); - - refresh(); - - } - - XContentBuilder getMatchingNestedDoc() throws IOException { - XContentBuilder doc = XContentFactory.jsonBuilder(); - doc.startObject().field("companyname", "stark").startArray("employee") - .startObject().field("name", "virginia potts").endObject() - .startObject().field("name", "tony stark").endObject() - .endArray().endObject(); - return doc; - } - - XContentBuilder getNotMatchingNestedDoc() throws IOException { - XContentBuilder doc = XContentFactory.jsonBuilder(); - doc.startObject().field("companyname", "notstark").startArray("employee") - .startObject().field("name", "virginia stark").endObject() - .startObject().field("name", "tony potts").endObject() - .endArray().endObject(); - return doc; - } - - public void testNestedDocFilter() throws IOException { - String mapping = "{\n" + - " \"doc\": {\n" + - " \"properties\": {\n" + - " \"name\": {\"type\":\"text\"},\n" + - " \"persons\": {\n" + - " \"type\": \"nested\"\n," + - " \"properties\" : {\"foo\" : {\"type\" : \"text\"}}" + - " }\n" + - " }\n" + - " }\n" + - " }"; - String doc = "{\n" + - " \"name\": \"obama\",\n" + - " \"persons\": [\n" + - " {\n" + - " \"foo\": \"bar\"\n" + - " }\n" + - " ]\n" + - " }"; - String q1 = "{\n" + - " \"query\": {\n" + - " \"bool\": {\n" + - " \"must\": {\n" + - " \"match\": {\n" + - " \"name\": \"obama\"\n" + - " }\n" + - " }\n" + - " }\n" + - " },\n" + - "\"text\":\"foo\""+ - "}"; - String q2 = "{\n" + - " \"query\": {\n" + - " \"bool\": {\n" + - " \"must_not\": {\n" + - " \"match\": {\n" + - " \"name\": \"obama\"\n" + - " }\n" + - " }\n" + - " }\n" + - " },\n" + - "\"text\":\"foo\""+ - "}"; - String q3 = "{\n" + - " \"query\": {\n" + - " \"bool\": {\n" + - " \"must\": {\n" + - " \"match\": {\n" + - " \"persons.foo\": \"bar\"\n" + - " }\n" + - " }\n" + - " }\n" + - " },\n" + - "\"text\":\"foo\""+ - "}"; - String q4 = "{\n" + - " \"query\": {\n" + - " \"bool\": {\n" + - " \"must_not\": {\n" + - " \"match\": {\n" + - " \"persons.foo\": \"bar\"\n" + - " }\n" + - " }\n" + - " }\n" + - " },\n" + - "\"text\":\"foo\""+ - "}"; - String q5 = "{\n" + - " \"query\": {\n" + - " \"bool\": {\n" + - " \"must\": {\n" + - " \"nested\": {\n" + - " \"path\": \"persons\",\n" + - " \"query\": {\n" + - " \"match\": {\n" + - " \"persons.foo\": \"bar\"\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - " },\n" + - "\"text\":\"foo\""+ - "}"; - String q6 = "{\n" + - " \"query\": {\n" + - " \"bool\": {\n" + - " \"must_not\": {\n" + - " \"nested\": {\n" + - " \"path\": \"persons\",\n" + - " \"query\": {\n" + - " \"match\": {\n" + - " \"persons.foo\": \"bar\"\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - " },\n" + - "\"text\":\"foo\""+ - "}"; - assertAcked(client().admin().indices().prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("doc", mapping)); - ensureGreen(INDEX_NAME); - client().prepareIndex(INDEX_NAME, TYPE_NAME).setSource(q1).setId("q1").get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME).setSource(q2).setId("q2").get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME).setSource(q3).setId("q3").get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME).setSource(q4).setId("q4").get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME).setSource(q5).setId("q5").get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME).setSource(q6).setId("q6").get(); - refresh(); - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("doc") - .setPercolateDoc(docBuilder().setDoc(doc)) - .get(); - assertMatchCount(response, 3L); - Set expectedIds = new HashSet<>(); - expectedIds.add("q1"); - expectedIds.add("q4"); - expectedIds.add("q5"); - for (PercolateResponse.Match match : response.getMatches()) { - assertTrue(expectedIds.remove(match.getId().string())); - } - assertTrue(expectedIds.isEmpty()); - response = preparePercolate(client()).setOnlyCount(true) - .setIndices(INDEX_NAME).setDocumentType("doc") - .setPercolateDoc(docBuilder().setDoc(doc)) - .get(); - assertMatchCount(response, 3L); - response = preparePercolate(client()).setScore(randomBoolean()).setSortByScore(randomBoolean()).setOnlyCount(randomBoolean()).setPercolateQuery(QueryBuilders.termQuery("text", "foo")) - .setIndices(INDEX_NAME).setDocumentType("doc") - .setPercolateDoc(docBuilder().setDoc(doc)) - .get(); - assertMatchCount(response, 3L); - } - - public void testMapUnmappedFieldAsString() throws IOException{ - // If index.percolator.map_unmapped_fields_as_string is set to true, unmapped field is mapped as an analyzed string. - Settings.Builder settings = Settings.builder() - .put(indexSettings()) - .put("index.percolator.map_unmapped_fields_as_string", true); - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .setSettings(settings)); - client().prepareIndex(INDEX_NAME, TYPE_NAME) - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()).get(); - refresh(); - logger.info("--> Percolate doc with field1=value"); - PercolateResponse response1 = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject())) - .execute().actionGet(); - assertMatchCount(response1, 1L); - assertThat(response1.getMatches(), arrayWithSize(1)); - } - - public void testGeoShapeWithMapUnmappedFieldAsString() throws Exception { - // If index.percolator.map_unmapped_fields_as_string is set to true, unmapped field is mapped as an analyzed string. - Settings.Builder settings = Settings.builder() - .put(indexSettings()) - .put("index.percolator.map_unmapped_fields_as_string", true); - assertAcked(prepareCreate(INDEX_NAME) - .setSettings(settings) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("type", "location", "type=geo_shape")); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", geoShapeQuery("location", ShapeBuilders.newEnvelope(new Coordinate(0d, 50d), new Coordinate(2d, 40d)))).endObject()) - .get(); - refresh(); - - PercolateResponse response1 = preparePercolate(client()) - .setIndices(INDEX_NAME).setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject() - .startObject("location") - .field("type", "point") - .field("coordinates", Arrays.asList(1.44207d, 43.59959d)) - .endObject() - .endObject())) - .execute().actionGet(); - assertMatchCount(response1, 1L); - assertThat(response1.getMatches().length, equalTo(1)); - assertThat(response1.getMatches()[0].getId().string(), equalTo("1")); - } - - public void testFailParentChild() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("child", "_parent", "type=parent").addMapping("parent")); - Exception e = expectThrows(MapperParsingException.class, () -> client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", hasChildQuery("child", matchAllQuery(), ScoreMode.None)).endObject()) - .get()); - assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(e.getCause().getMessage(), equalTo("the [has_child] query is unsupported inside a percolator query")); - } - - public void testPercolateDocumentWithParentField() throws Exception { - assertAcked(prepareCreate(INDEX_NAME) - .addMapping(TYPE_NAME, "query", "type=percolator") - .addMapping("child", "_parent", "type=parent").addMapping("parent")); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute().actionGet(); - refresh(); - - // Just percolating a document that has a _parent field in its mapping should just work: - PercolateResponse response = preparePercolate(client()) - .setDocumentType("parent") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("field", "value")) - .get(); - assertMatchCount(response, 1); - assertThat(response.getMatches()[0].getId().string(), equalTo("1")); - } - - public void testFilterByNow() throws Exception { - prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get(); - client().prepareIndex(INDEX_NAME, TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("created", "2015-07-10T14:41:54+0000").endObject()) - .get(); - refresh(); - - PercolateResponse response = preparePercolate(client()) - .setIndices(INDEX_NAME) - .setDocumentType("type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) - .setPercolateQuery(rangeQuery("created").lte("now")) - .get(); - assertMatchCount(response, 1); - } -} - diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 665b9926a58..1cce5fdbbdb 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -19,6 +19,8 @@ package org.elasticsearch.percolator; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; @@ -48,6 +50,8 @@ import java.util.Map; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -58,16 +62,13 @@ import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery; import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.percolator.PercolateSourceBuilder.docBuilder; -import static org.elasticsearch.percolator.PercolatorTestUtil.assertMatchCount; -import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; -import static org.elasticsearch.percolator.PercolatorTestUtil.preparePercolate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.arrayWithSize; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.IsNull.notNullValue; public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { @@ -87,19 +88,16 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { public void testPercolateScriptQuery() throws IOException { client().admin().indices().prepareCreate("index").addMapping("type", "query", "type=percolator").get(); - ensureGreen(); client().prepareIndex("index", "type", "1") .setSource(jsonBuilder().startObject().field("query", QueryBuilders.scriptQuery( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1==1", Collections.emptyMap()))).endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .execute().actionGet(); - PercolateResponse response = preparePercolate(client()) - .setIndices("index").setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), "index"), arrayContainingInAnyOrder("1")); + SearchResponse response = client().prepareSearch("index") + .setQuery(new PercolateQueryBuilder("query", "type", jsonBuilder().startObject().field("field1", "b").endObject().bytes())) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); } public void testPercolatorQuery() throws Exception { @@ -605,4 +603,82 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { assertHitCount(response, 0); } + public void testPercolatorQueryViaMultiSearch() throws Exception { + createIndex("test", client().admin().indices().prepareCreate("test") + .addMapping("type", "field1", "type=text") + .addMapping("queries", "query", "type=percolator") + ); + + client().prepareIndex("test", "queries", "1") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) + .execute().actionGet(); + client().prepareIndex("test", "queries", "2") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) + .execute().actionGet(); + client().prepareIndex("test", "queries", "3") + .setSource(jsonBuilder().startObject().field("query", boolQuery() + .must(matchQuery("field1", "b")) + .must(matchQuery("field1", "c")) + ).endObject()) + .execute().actionGet(); + client().prepareIndex("test", "queries", "4") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) + .execute().actionGet(); + client().prepareIndex("test", "type", "1") + .setSource(jsonBuilder().startObject().field("field1", "c").endObject()) + .execute().actionGet(); + client().admin().indices().prepareRefresh().get(); + + MultiSearchResponse response = client().prepareMultiSearch() + .add(client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", "type", + jsonBuilder().startObject().field("field1", "b").endObject().bytes()))) + .add(client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", "type", + yamlBuilder().startObject().field("field1", "c").endObject().bytes()))) + .add(client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", "type", + smileBuilder().startObject().field("field1", "b c").endObject().bytes()))) + .add(client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", "type", + jsonBuilder().startObject().field("field1", "d").endObject().bytes()))) + .add(client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "1", null, null, null))) + .add(client().prepareSearch("test") // non existing doc, so error element + .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "2", null, null, null))) + .get(); + + MultiSearchResponse.Item item = response.getResponses()[0]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "1", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[1]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "2", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[2]; + assertHitCount(item.getResponse(), 4L); + assertSearchHits(item.getResponse(), "1", "2", "3", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[3]; + assertHitCount(item.getResponse(), 1L); + assertSearchHits(item.getResponse(), "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[4]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "2", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[5]; + assertThat(item.getResponse(), nullValue()); + assertThat(item.getFailureMessage(), notNullValue()); + assertThat(item.getFailureMessage(), equalTo("all shards failed")); + assertThat(ExceptionsHelper.unwrapCause(item.getFailure().getCause()).getMessage(), + containsString("[test/type/2] couldn't be found")); + } + } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorTestUtil.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorTestUtil.java deleted file mode 100644 index 456c2508d8e..00000000000 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorTestUtil.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.Strings; -import org.junit.Assert; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.formatShardStatus; - -/** Static method pulled out of PercolatorIT, used by other tests */ -public class PercolatorTestUtil extends Assert { - - public static PercolateRequestBuilder preparePercolate(ElasticsearchClient client) { - return new PercolateRequestBuilder(client, PercolateAction.INSTANCE); - } - - public static MultiPercolateRequestBuilder prepareMultiPercolate(ElasticsearchClient client) { - return new MultiPercolateRequestBuilder(client, MultiPercolateAction.INSTANCE); - } - - public static void assertMatchCount(PercolateResponse percolateResponse, long expectedHitCount) { - if (percolateResponse.getCount() != expectedHitCount) { - fail("Count is " + percolateResponse.getCount() + " but " + expectedHitCount + " was expected. " + - formatShardStatus(percolateResponse)); - } - assertVersionSerializable(percolateResponse); - } - - public static String[] convertFromTextArray(PercolateResponse.Match[] matches, String index) { - if (matches.length == 0) { - return Strings.EMPTY_ARRAY; - } - String[] strings = new String[matches.length]; - for (int i = 0; i < matches.length; i++) { - assertEquals(index, matches[i].getIndex().string()); - strings[i] = matches[i].getId().string(); - } - return strings; - } - -} diff --git a/modules/percolator/src/test/resources/org/elasticsearch/percolator/mpercolate1.json b/modules/percolator/src/test/resources/org/elasticsearch/percolator/mpercolate1.json deleted file mode 100644 index 44079390bfc..00000000000 --- a/modules/percolator/src/test/resources/org/elasticsearch/percolator/mpercolate1.json +++ /dev/null @@ -1,16 +0,0 @@ -{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : false}} -{"doc" : {"field1" : "value1"}} -{"percolate" : {"indices" : ["my-index2", "my-index3"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}} -{"doc" : {"field1" : "value2"}} -{"count" : {"indices" : ["my-index4", "my-index5"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : "open,closed"}} -{"doc" : {"field1" : "value3"}} -{"percolate" : {"id" : "1", "index" : "my-index6", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : ["open", "closed"]}} -{} -{"count" : {"id" : "2", "index" : "my-index7", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local"}} -{} -{"percolate" : {"index" : "my-index8", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "primary"}} -{"doc" : {"field1" : "value4"}} -{"percolate" : {"id" : "3", "index" : "my-index9", "type" : "my-type1", "percolate_index": "percolate-index1", "percolate_type": "other-type", "percolate_preference": "_local", "percolate_routing": "percolate-routing-1"}} -{} -{"percolate" : {"id" : "4", "index" : "my-index10", "type" : "my-type1", "allow_no_indices": false, "expand_wildcards" : ["open"]}} -{} diff --git a/modules/percolator/src/test/resources/org/elasticsearch/percolator/mpercolate2.json b/modules/percolator/src/test/resources/org/elasticsearch/percolator/mpercolate2.json deleted file mode 100644 index fa676cf6182..00000000000 --- a/modules/percolator/src/test/resources/org/elasticsearch/percolator/mpercolate2.json +++ /dev/null @@ -1,6 +0,0 @@ -{"percolate" : {"routing" : "my-routing-1", "preference" : "_local"}} -{"doc" : {"field1" : "value1"}} -{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}} -{"doc" : {"field1" : "value2"}} -{"percolate" : {}} -{"doc" : {"field1" : "value3"}} diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yaml new file mode 100644 index 00000000000..d8f2b3264e8 --- /dev/null +++ b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yaml @@ -0,0 +1,50 @@ +--- +"Test percolator basics via rest": + - do: + indices.create: + index: queries_index + body: + mappings: + queries: + properties: + query: + type: percolator + test_type: + properties: + foo: + type: keyword + + - do: + index: + index: queries_index + type: queries + id: test_percolator + body: + query: + match_all: {} + + - do: + indices.refresh: {} + + - do: + search: + body: + - query: + percolate: + document_type: test_type + field: query + document: + foo: bar + - match: { hits.total: 1 } + + - do: + msearch: + body: + - index: queries_index + - query: + percolate: + document_type: test_type + field: query + document: + foo: bar + - match: { responses.0.hits.total: 1 } diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/mpercolate/10_basic.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/mpercolate/10_basic.yaml deleted file mode 100644 index ab75d6abe29..00000000000 --- a/modules/percolator/src/test/resources/rest-api-spec/test/mpercolate/10_basic.yaml +++ /dev/null @@ -1,56 +0,0 @@ ---- -"Basic multi-percolate": - - - do: - indices.create: - index: percolator_index - body: - mappings: - queries: - properties: - query: - type: percolator - - - do: - index: - index: percolator_index - type: my_type - id: 1 - body: {foo: bar} - - - do: - index: - index: percolator_index - type: queries - id: test_percolator - body: - query: - match_all: {} - - - do: - indices.refresh: {} - - - do: - mpercolate: - body: - - percolate: - index: percolator_index - type: my_type - - doc: - foo: bar - - percolate: - index: percolator_index1 - type: my_type - - doc: - foo: bar - - percolate: - index: percolator_index - type: my_type - id: 1 - - {} - - - match: { responses.0.total: 1 } - - match: { responses.1.error.root_cause.0.type: index_not_found_exception } - - match: { responses.1.error.root_cause.0.reason: "/no.such.index/" } - - match: { responses.1.error.root_cause.0.index: percolator_index1 } - - match: { responses.2.total: 1 } diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/15_new.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/percolate/15_new.yaml deleted file mode 100644 index 45532d95757..00000000000 --- a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/15_new.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -"Basic percolation tests": - - - do: - indices.create: - index: test_index - body: - mappings: - queries: - properties: - query: - type: percolator - - - do: - index: - index: test_index - type: queries - id: test_percolator - body: - query: - match_all: {} - - - do: - indices.refresh: {} - - - do: - percolate: - index: test_index - type: test_type - body: - doc: - foo: bar - - - match: {'total': 1} - - match: {'matches': [{_index: test_index, _id: test_percolator}]} - - - do: - count_percolate: - index: test_index - type: test_type - body: - doc: - foo: bar - - - is_false: matches - - match: {'total': 1} diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/16_existing_doc.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/percolate/16_existing_doc.yaml deleted file mode 100644 index 2d15e2cd222..00000000000 --- a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/16_existing_doc.yaml +++ /dev/null @@ -1,118 +0,0 @@ ---- -"Percolate existing documents": - - - do: - indices.create: - index: percolator_index - body: - mappings: - queries: - properties: - query: - type: percolator - - - do: - index: - index: percolator_index - type: queries - id: test_percolator - body: - query: - match_all: {} - tag: tag1 - - - do: - index: - index: percolator_index - type: test_type - id: 1 - body: - foo: bar - - - do: - indices.create: - index: my_index - - - do: - index: - index: my_index - type: my_type - id: 1 - body: - foo: bar - - - do: - indices.refresh: {} - - - do: - percolate: - index: percolator_index - type: test_type - id: 1 - - - match: {'matches': [{_index: percolator_index, _id: test_percolator}]} - - - do: - percolate: - index: my_index - type: my_type - id: 1 - percolate_index: percolator_index - percolate_type: test_type - - - match: {'matches': [{_index: percolator_index, _id: test_percolator}]} - - - - do: - index: - index: my_index - type: my_type - id: 1 - body: - foo: bar - - - do: - percolate: - index: my_index - type: my_type - id: 1 - version: 2 - percolate_index: percolator_index - percolate_type: test_type - - - match: {'matches': [{_index: percolator_index, _id: test_percolator}]} - - - do: - catch: conflict - percolate: - index: my_index - type: my_type - id: 1 - version: 1 - percolate_index: percolator_index - percolate_type: test_type - - - do: - percolate: - index: percolator_index - type: test_type - id: 1 - body: - filter: - term: - tag: non_existing_tag - - - match: {'matches': []} - - - do: - percolate: - index: percolator_index - type: test_type - id: 1 - body: - filter: - term: - tag: tag1 - - - match: {'matches': [{_index: percolator_index, _id: test_percolator, _score: 0.2876821}]} - diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/17_empty.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/percolate/17_empty.yaml deleted file mode 100644 index a6a56bb4636..00000000000 --- a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/17_empty.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -"Basic percolation tests on an empty cluster": - - - do: - indices.create: - index: test_index - body: - mappings: - queries: - properties: - query: - type: percolator - - - do: - indices.refresh: {} - - - do: - percolate: - index: test_index - type: test_type - body: - doc: - foo: bar - - - match: {'total': 0} - - match: {'matches': []} - - - do: - count_percolate: - index: test_index - type: test_type - body: - doc: - foo: bar - - - is_false: matches - - match: {'total': 0} diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/18_highligh_with_query.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/percolate/18_highligh_with_query.yaml deleted file mode 100644 index 8f3287ddd02..00000000000 --- a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/18_highligh_with_query.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -"Basic percolation highlight query test": - - - do: - indices.create: - index: test_index - body: - mappings: - type_1: - properties: - foo: - type: text - queries: - properties: - query: - type: percolator - - - do: - index: - index: test_index - type: queries - id: test_percolator - body: - query: - match: - foo: bar - - - do: - indices.refresh: {} - - - do: - percolate: - index: test_index - type: type_1 - body: - doc: - foo: "bar foo" - size: 1 - highlight: - fields: - foo: - highlight_query: - match: - foo: foo - - - match: {'total': 1} diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/19_nested.yaml b/modules/percolator/src/test/resources/rest-api-spec/test/percolate/19_nested.yaml deleted file mode 100644 index eff66846da6..00000000000 --- a/modules/percolator/src/test/resources/rest-api-spec/test/percolate/19_nested.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -setup: - - do: - indices.create: - index: nestedindex - body: - mappings: - company: - properties: - companyname: - type: text - employee: - type: nested - properties: - name: - type: text - queries: - properties: - query: - type: percolator - - - - do: - indices.refresh: {} - - do: - index: - index: nestedindex - type: "queries" - id: query - body: { "query": { "nested": { "path": "employee", "score_mode": "avg", "query": { "match": { "employee.name": { "query": "virginia potts", "operator": "and"} } } } } } - - do: - indices.refresh: {} - - ---- -"Basic percolation tests on nested doc": - - do: - percolate: - index: nestedindex - type: company - body: { "doc": { "companyname": "stark", "employee": [ { "name": "virginia stark"}, { "name": "tony potts"} ] } } - - - match: {'total': 0} - - - do: - percolate: - index: nestedindex - type: company - body: { "doc": { "companyname": "stark", "employee": [ { "name": "virginia potts"}, { "name": "tony stark"} ] } } - - - match: {'total': 1} - ---- -"Percolate existing docs": - - do: - index: - index: nestedindex - type: company - id: notmatching - body: { "companyname": "stark", "employee": [ { "name": "virginia stark"}, { "name": "tony potts"} ] } - - - - do: - index: - index: nestedindex - type: company - id: matching - body: { "companyname": "stark", "employee": [ { "name": "virginia potts"}, { "name": "tony stark"} ] } - - - do: - indices.refresh: {} - - - do: - percolate: - index: nestedindex - type: company - id: notmatching - - - match: {'total': 0} - - - do: - percolate: - index: nestedindex - type: company - id: matching - - - match: {'total': 1} - ---- -"Test multi percolate": - - - - do: - mpercolate: - body: - - percolate: {"index": "nestedindex", "type": "company"} - - doc: { "companyname": "stark", "employee": [ { "name": "virginia stark"}, { "name": "tony potts"} ] } - - percolate: {"index": "nestedindex", "type": "company"} - - doc: { "companyname": "stark", "employee": [ { "name": "virginia potts"}, { "name": "tony stark"} ] } - - - match: {'responses.0.total': 0} - - match: {'responses.1.total': 1} - - - do: - mpercolate: - body: - - percolate: {"index": "nestedindex", "type": "company"} - - doc: { "companyname": "stark", "employee": [ { "name": "virginia potts"}, {"name": "tony stark"} ] } - - percolate: {"index": "nestedindex", "type": "company"} - - doc: { "companyname": "stark", "employee": [ { "name": "virginia stark"}, { "name": "tony potts"} ] } - - - - match: {'responses.0.total': 1} - - match: {'responses.1.total': 0} - - - diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index 10cb31fc992..cbc6009b51c 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -23,14 +23,12 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.tasks.LoggingTaskListener; import org.elasticsearch.tasks.Task; @@ -43,15 +41,10 @@ public abstract class AbstractBaseReindexRestHandler< A extends GenericAction > extends BaseRestHandler { - protected final SearchRequestParsers searchRequestParsers; - private final ClusterService clusterService; private final A action; - protected AbstractBaseReindexRestHandler(Settings settings, SearchRequestParsers searchRequestParsers, ClusterService clusterService, - A action) { + protected AbstractBaseReindexRestHandler(Settings settings, A action) { super(settings); - this.searchRequestParsers = searchRequestParsers; - this.clusterService = clusterService; this.action = action; } @@ -80,7 +73,7 @@ public abstract class AbstractBaseReindexRestHandler< if (validationException != null) { throw validationException; } - return sendTask(client.executeLocally(action, internal, LoggingTaskListener.instance())); + return sendTask(client.getLocalNodeId(), client.executeLocally(action, internal, LoggingTaskListener.instance())); } /** @@ -111,11 +104,11 @@ public abstract class AbstractBaseReindexRestHandler< return request; } - private RestChannelConsumer sendTask(Task task) throws IOException { + private RestChannelConsumer sendTask(String localNodeId, Task task) throws IOException { return channel -> { try (XContentBuilder builder = channel.newBuilder()) { builder.startObject(); - builder.field("task", clusterService.localNode().getId() + ":" + task.getId()); + builder.field("task", localNodeId + ":" + task.getId()); builder.endObject(); channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java index 7e208986aa7..1b6b8c74a78 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -22,14 +22,12 @@ package org.elasticsearch.index.reindex; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.search.SearchRequestParsers; import java.io.IOException; import java.util.Map; @@ -44,9 +42,8 @@ public abstract class AbstractBulkByQueryRestHandler< Request extends AbstractBulkByScrollRequest, A extends GenericAction> extends AbstractBaseReindexRestHandler { - protected AbstractBulkByQueryRestHandler(Settings settings, SearchRequestParsers searchRequestParsers, ClusterService clusterService, - A action) { - super(settings, searchRequestParsers, clusterService, action); + protected AbstractBulkByQueryRestHandler(Settings settings, A action) { + super(settings, action); } protected void parseInternalRequest(Request internal, RestRequest restRequest, @@ -65,8 +62,7 @@ public abstract class AbstractBulkByQueryRestHandler< * the generated parser probably is a noop but we should do the accounting just in case. It doesn't hurt to close twice but it * really hurts not to close if by some miracle we have to. */ try { - RestSearchAction.parseSearchRequest(searchRequest, restRequest, searchRequestParsers, parseFieldMatcher, - searchRequestParser); + RestSearchAction.parseSearchRequest(searchRequest, restRequest, parseFieldMatcher, searchRequestParser); } finally { IOUtils.close(searchRequestParser); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 7b6be85140f..8d3583a077d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -88,11 +88,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { } @Override - public void clearScroll(String scrollId) { - /* - * Fire off the clear scroll but don't wait for it it return before - * we send the use their response. - */ + public void clearScroll(String scrollId, Runnable onCompletion) { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(scrollId); /* @@ -103,15 +99,22 @@ public class ClientScrollableHitSource extends ScrollableHitSource { @Override public void onResponse(ClearScrollResponse response) { logger.debug("Freed [{}] contexts", response.getNumFreed()); + onCompletion.run(); } @Override public void onFailure(Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); + onCompletion.run(); } }); } + @Override + protected void cleanup() { + // Nothing to do + } + /** * Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by * rejected execution. @@ -182,7 +185,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { } else { failures = new ArrayList<>(response.getShardFailures().length); for (ShardSearchFailure failure: response.getShardFailures()) { - String nodeId = failure.shard() == null ? null : failure.shard().nodeId(); + String nodeId = failure.shard() == null ? null : failure.shard().getNodeId(); failures.add(new SearchFailure(failure.getCause(), failure.index(), failure.shardId(), nodeId)); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 618db3dfa48..2f684fe96ea 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -71,6 +71,9 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest { @Inject - public RestDeleteByQueryAction(Settings settings, RestController controller, SearchRequestParsers searchRequestParsers, - ClusterService clusterService) { - super(settings, searchRequestParsers, clusterService, DeleteByQueryAction.INSTANCE); + public RestDeleteByQueryAction(Settings settings, RestController controller) { + super(settings, DeleteByQueryAction.INSTANCE); controller.registerHandler(POST, "/{index}/_delete_by_query", this); controller.registerHandler(POST, "/{index}/{type}/_delete_by_query", this); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index c631a4c7a1b..51477b76032 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -44,7 +42,6 @@ import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.Script; -import org.elasticsearch.search.SearchRequestParsers; import java.io.IOException; import java.util.List; @@ -62,11 +59,11 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; * Expose reindex over rest. */ public class RestReindexAction extends AbstractBaseReindexRestHandler { - static final ObjectParser PARSER = new ObjectParser<>("reindex"); + static final ObjectParser PARSER = new ObjectParser<>("reindex"); private static final Pattern HOST_PATTERN = Pattern.compile("(?[^:]+)://(?[^:]+):(?\\d+)"); static { - ObjectParser.Parser sourceParser = (parser, request, context) -> { + ObjectParser.Parser sourceParser = (parser, request, context) -> { // Funky hack to work around Search not having a proper ObjectParser and us wanting to extract query if using remote. Map source = parser.map(); String[] indices = extractStringArray(source, "index"); @@ -81,11 +78,11 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler destParser = new ObjectParser<>("dest"); + ObjectParser destParser = new ObjectParser<>("dest"); destParser.declareString(IndexRequest::index, new ParseField("index")); destParser.declareString(IndexRequest::type, new ParseField("type")); destParser.declareString(IndexRequest::routing, new ParseField("routing")); @@ -102,9 +99,8 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler map = (Map) query; return builder.map(map).bytes(); } - - static class ReindexParseContext implements ParseFieldMatcherSupplier { - private final SearchRequestParsers searchRequestParsers; - private final ParseFieldMatcher parseFieldMatcher; - - ReindexParseContext(SearchRequestParsers searchRequestParsers, ParseFieldMatcher parseFieldMatcher) { - this.searchRequestParsers = searchRequestParsers; - this.parseFieldMatcher = parseFieldMatcher; - } - - QueryParseContext queryParseContext(XContentParser parser) { - return new QueryParseContext(parser, parseFieldMatcher); - } - - @Override - public ParseFieldMatcher getParseFieldMatcher() { - return this.parseFieldMatcher; - } - } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index f21083e4ef3..3c0f5d2772d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -30,7 +29,6 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.SearchRequestParsers; import java.io.IOException; import java.util.Collections; @@ -45,9 +43,8 @@ import static org.elasticsearch.script.Script.DEFAULT_SCRIPT_LANG; public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler { @Inject - public RestUpdateByQueryAction(Settings settings, RestController controller, SearchRequestParsers searchRequestParsers, - ClusterService clusterService) { - super(settings, searchRequestParsers, clusterService, UpdateByQueryAction.INSTANCE); + public RestUpdateByQueryAction(Settings settings, RestController controller) { + super(settings, UpdateByQueryAction.INSTANCE); controller.registerHandler(POST, "/{index}/_update_by_query", this); controller.registerHandler(POST, "/{index}/{type}/_update_by_query", this); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index bf13d6d72e2..1945c6e2f76 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -83,13 +83,24 @@ public abstract class ScrollableHitSource implements Closeable { protected abstract void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse); @Override - public void close() { + public final void close() { String scrollId = this.scrollId.get(); if (Strings.hasLength(scrollId)) { - clearScroll(scrollId); + clearScroll(scrollId, this::cleanup); + } else { + cleanup(); } } - protected abstract void clearScroll(String scrollId); + /** + * Called to clear a scroll id. + * @param scrollId the id to clear + * @param onCompletion implementers must call this after completing the clear whether they are successful or not + */ + protected abstract void clearScroll(String scrollId, Runnable onCompletion); + /** + * Called after the process has been totally finished to clean up any resources the process needed like remote connections. + */ + protected abstract void cleanup(); /** * Set the id of the last scroll. Used for debugging. diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 8828878563c..2905f5b39cf 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -102,27 +102,30 @@ final class RemoteRequestBuilders { String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields"; params.put(storedFieldsParamName, fields.toString()); } - // We always want the _source document and this will force it to be returned. - params.put("_source", "true"); return params; } - static HttpEntity initialSearchEntity(BytesReference query) { + static HttpEntity initialSearchEntity(SearchRequest searchRequest, BytesReference query) { // EMPTY is safe here because we're not calling namedObject try (XContentBuilder entity = JsonXContent.contentBuilder(); XContentParser queryParser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, query)) { entity.startObject(); - entity.field("query"); - /* - * We're intentionally a bit paranoid here - copying the query as xcontent rather than writing a raw field. We don't want poorly - * written queries to escape. Ever. - */ - entity.copyCurrentStructure(queryParser); - XContentParser.Token shouldBeEof = queryParser.nextToken(); - if (shouldBeEof != null) { - throw new ElasticsearchException( - "query was more than a single object. This first token after the object is [" + shouldBeEof + "]"); + + entity.field("query"); { + /* We're intentionally a bit paranoid here - copying the query as xcontent rather than writing a raw field. We don't want + * poorly written queries to escape. Ever. */ + entity.copyCurrentStructure(queryParser); + XContentParser.Token shouldBeEof = queryParser.nextToken(); + if (shouldBeEof != null) { + throw new ElasticsearchException( + "query was more than a single object. This first token after the object is [" + shouldBeEof + "]"); + } } + + if (searchRequest.source().fetchSource() != null) { + entity.field("_source", searchRequest.source().fetchSource()); + } + entity.endObject(); BytesRef bytes = entity.bytes().toBytesRef(); return new ByteArrayEntity(bytes.bytes, bytes.offset, bytes.length, ContentType.APPLICATION_JSON); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 3fc3dc555e6..03a90bba815 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -81,27 +81,12 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { this.client = client; } - @Override - public void close() { - super.close(); - /* This might be called on the RestClient's thread pool and attempting to close the client on its own threadpool causes it to fail - * to close. So we always shutdown the RestClient asynchronously on a thread in Elasticsearch's generic thread pool. That way we - * never close the client in its own thread pool. */ - threadPool.generic().submit(() -> { - try { - client.close(); - } catch (IOException e) { - logger.error("Failed to shutdown the remote connection", e); - } - }); - } - @Override protected void doStart(Consumer onResponse) { lookupRemoteVersion(version -> { remoteVersion = version; execute("POST", initialSearchPath(searchRequest), initialSearchParams(searchRequest, version), - initialSearchEntity(query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r)); + initialSearchEntity(searchRequest, query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r)); }); } @@ -125,17 +110,32 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { } @Override - protected void clearScroll(String scrollId) { - // Need to throw out response.... + protected void clearScroll(String scrollId, Runnable onCompletion) { client.performRequestAsync("DELETE", scrollPath(), emptyMap(), scrollEntity(scrollId), new ResponseListener() { @Override public void onSuccess(org.elasticsearch.client.Response response) { logger.debug("Successfully cleared [{}]", scrollId); + onCompletion.run(); } @Override public void onFailure(Exception t) { logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), t); + onCompletion.run(); + } + }); + } + + @Override + protected void cleanup() { + /* This is called on the RestClient's thread pool and attempting to close the client on its own threadpool causes it to fail to + * close. So we always shutdown the RestClient asynchronously on a thread in Elasticsearch's generic thread pool. */ + threadPool.generic().submit(() -> { + try { + client.close(); + logger.info("Shut down remote connection"); + } catch (IOException e) { + logger.error("Failed to shutdown the remote connection", e); } }); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index 1fa3b50b3a4..abef7fb5902 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -21,16 +21,13 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.reindex.RestReindexAction.ReindexParseContext; import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.rest.RestController; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; @@ -124,16 +121,14 @@ public class RestReindexActionTests extends ESTestCase { } try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) { ReindexRequest r = new ReindexRequest(new SearchRequest(), new IndexRequest()); - SearchRequestParsers searchParsers = new SearchRequestParsers(); - RestReindexAction.PARSER.parse(p, r, new ReindexParseContext(searchParsers, ParseFieldMatcher.STRICT)); + RestReindexAction.PARSER.parse(p, r, null); assertEquals("localhost", r.getRemoteInfo().getHost()); assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices()); } } public void testPipelineQueryParameterIsError() throws IOException { - SearchRequestParsers parsers = new SearchRequestParsers(); - RestReindexAction action = new RestReindexAction(Settings.EMPTY, mock(RestController.class), parsers, null); + RestReindexAction action = new RestReindexAction(Settings.EMPTY, mock(RestController.class)); FakeRestRequest.Builder request = new FakeRestRequest.Builder(xContentRegistry()); try (XContentBuilder body = JsonXContent.contentBuilder().prettyPrint()) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index dec5263352a..3de7e09debe 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -152,20 +152,29 @@ public class RemoteRequestBuildersTests extends ESTestCase { assertThat(params, scroll == null ? not(hasKey("scroll")) : hasEntry("scroll", scroll.toString())); assertThat(params, hasEntry("size", Integer.toString(size))); assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry("version", null) : not(hasEntry("version", null))); - assertThat(params, hasEntry("_source", "true")); } public void testInitialSearchEntity() throws IOException { + SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(new SearchSourceBuilder()); String query = "{\"match_all\":{}}"; - HttpEntity entity = initialSearchEntity(new BytesArray(query)); + HttpEntity entity = initialSearchEntity(searchRequest, new BytesArray(query)); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertEquals("{\"query\":" + query + "}", Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + // Source filtering is included if set up + searchRequest.source().fetchSource(new String[] {"in1", "in2"}, new String[] {"out"}); + entity = initialSearchEntity(searchRequest, new BytesArray(query)); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals("{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + // Invalid XContent fails - RuntimeException e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray("{}, \"trailing\": {}"))); + RuntimeException e = expectThrows(RuntimeException.class, + () -> initialSearchEntity(searchRequest, new BytesArray("{}, \"trailing\": {}"))); assertThat(e.getCause().getMessage(), containsString("Unexpected character (',' (code 44))")); - e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray("{"))); + e = expectThrows(RuntimeException.class, () -> initialSearchEntity(searchRequest, new BytesArray("{"))); assertThat(e.getCause().getMessage(), containsString("Unexpected end-of-input")); } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml index a567ca67bfa..3557cf9bad7 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml @@ -26,6 +26,13 @@ - is_false: task - is_false: deleted + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Response format for updated": - do: @@ -60,6 +67,13 @@ - is_false: task - is_false: deleted + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "wait_for_completion=false": - do: @@ -110,6 +124,13 @@ - is_false: response.task - is_false: response.deleted + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Response format for version conflict": - do: @@ -151,6 +172,13 @@ - match: {failures.0.cause.index: dest} - gte: { took: 0 } + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Response format for version conflict with conflicts=proceed": - do: @@ -185,210 +213,12 @@ - match: {throttled_millis: 0} - gte: { took: 0 } ---- -"Simplest example in docs": + # Make sure reindex closed all the scroll contexts - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: twitter - dest: - index: new_twitter - - - do: - search: - index: new_twitter - - match: { hits.total: 1 } - ---- -"Limit by type example in docs": - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: twitter - type: junk - id: 1 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: twitter - type: tweet - dest: - index: new_twitter - - - do: - search: - index: new_twitter - - match: { hits.total: 1 } - ---- -"Limit by query example in docs": - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: twitter - type: tweet - id: 2 - body: { "user": "junk" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: twitter - query: - match: - user: kimchy - dest: - index: new_twitter - - - do: - search: - index: new_twitter - - match: { hits.total: 1 } - ---- -"Override type example in docs": - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: twitter - type: junk - id: 1 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: twitter - type: tweet - dest: - index: new_twitter - type: chirp - - - do: - search: - index: new_twitter - type: chirp - - match: { hits.total: 1 } - ---- -"Multi index, multi type example from docs": - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: blog - type: post - id: 1 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: [twitter, blog] - type: [tweet, post] - dest: - index: all_together - - - do: - search: - index: all_together - type: tweet - body: - query: - match: - user: kimchy - - match: { hits.total: 1 } - - - do: - search: - index: all_together - type: post - body: - query: - match: - user: kimchy - - match: { hits.total: 1 } - ---- -"Limit by size example from docs": - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: twitter - type: tweet - id: 2 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - size: 1 - source: - index: twitter - dest: - index: new_twitter - - - do: - search: - index: new_twitter - type: tweet - - match: { hits.total: 1 } + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} --- "Source document without any fields works": @@ -416,3 +246,56 @@ type: foo id: 1 - match: { _source: {} } + + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + +--- +"Reindex with source filtering": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "filtered": "removed" } + refresh: true + + - do: + reindex: + refresh: true + body: + source: + index: source + _source: + excludes: + - filtered + dest: + index: dest + - match: {created: 1} + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - is_false: deleted + + - do: + get: + index: dest + type: foo + id: 1 + - match: { _source.text: "test" } + - is_false: _source.filtered + + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml index 50f12192960..c9f441c9cd3 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml @@ -295,3 +295,21 @@ index: test dest: index: dest + +--- +"_source:false is rejected": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + catch: /_source:false is not supported in this context/ + reindex: + body: + source: + index: source + _source: false + dest: + index: dest diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml index ab47a306f57..576447b4e54 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml @@ -46,6 +46,13 @@ text: test - match: {hits.total: 1} + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Reindex from remote with query": - do: @@ -95,6 +102,13 @@ match_all: {} - match: {hits.total: 1} + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Reindex from remote with routing": - do: @@ -137,6 +151,13 @@ text: test - match: {hits.total: 1} + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Reindex from remote with parent/child": - do: @@ -206,6 +227,13 @@ text: test - match: {hits.total: 1} + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Reindex from remote with timeouts": # Validates that you can configure the socket_timeout and connect_timeout, @@ -258,6 +286,76 @@ text: test - match: {hits.total: 1} + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + +--- +"Reindex from remote with size": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + refresh: true + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test" } + refresh: true + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + size: 1 + source: + remote: + host: http://${host} + index: source + dest: + index: dest + - match: {created: 1} + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - is_false: deleted + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: {hits.total: 1} + + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + --- "Reindex from remote with broken query": - do: @@ -311,3 +409,53 @@ index: source dest: index: dest + +--- +"Reindex from remote with source filtering": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "filtered": "removed" } + refresh: true + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + index: source + _source: + excludes: + - filtered + dest: + index: dest + - match: {created: 1} + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - is_false: deleted + + - do: + get: + index: dest + type: foo + id: 1 + - match: { _source.text: "test" } + - is_false: _source.filtered diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index 1250fb38a97..d809fd3fa88 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -35,7 +35,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.watcher.ResourceWatcherService; @@ -73,7 +72,6 @@ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin ThreadPool threadPool, ResourceWatcherService resourceWatcherService, ScriptService scriptService, - SearchRequestParsers searchRequestParsers, NamedXContentRegistry xContentRegistry) { final int concurrentConnects = UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[file_based_discovery_resolve]"); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index 9d67eea628b..5e0de46f65a 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.Node; import java.util.ArrayList; import java.util.Collections; @@ -34,19 +35,14 @@ import java.util.Map; import java.util.function.Function; public final class AzureStorageSettings { - private static final Setting.AffixKey TIMEOUT_KEY = Setting.AffixKey.withAffix(Storage.PREFIX, "timeout"); - - private static final Setting TIMEOUT_SETTING = Setting.affixKeySetting( - TIMEOUT_KEY, - (s) -> Storage.TIMEOUT_SETTING.get(s).toString(), - (s) -> Setting.parseTimeValue(s, TimeValue.timeValueSeconds(-1), TIMEOUT_KEY.toString()), - Setting.Property.NodeScope); + private static final Setting TIMEOUT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "timeout", + (key) -> Setting.timeSetting(key, Storage.TIMEOUT_SETTING, Setting.Property.NodeScope)); private static final Setting ACCOUNT_SETTING = - Setting.affixKeySetting(Storage.PREFIX, "account", "", Function.identity(), Setting.Property.NodeScope); + Setting.affixKeySetting(Storage.PREFIX, "account", (key) -> Setting.simpleString(key, Setting.Property.NodeScope)); private static final Setting KEY_SETTING = - Setting.affixKeySetting(Storage.PREFIX, "key", "", Function.identity(), Setting.Property.NodeScope); + Setting.affixKeySetting(Storage.PREFIX, "key", (key) -> Setting.simpleString(key, Setting.Property.NodeScope)); private static final Setting DEFAULT_SETTING = - Setting.affixKeySetting(Storage.PREFIX, "default", "false", Boolean::valueOf, Setting.Property.NodeScope); + Setting.affixKeySetting(Storage.PREFIX, "default", (key) -> Setting.boolSetting(key, false, Setting.Property.NodeScope)); private final String name; diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml index c836ba73fa0..f9475057bc4 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml @@ -26,6 +26,13 @@ - '{"index": {"_index": "test_index", "_type": "test_type"}}' - '{"f1": "v5_mixed", "f2": 9}' + - do: + index: + index: test_index + type: test_type + id: d10 + body: {"f1": "v6_mixed", "f2": 10} + - do: indices.flush: index: test_index @@ -34,7 +41,23 @@ search: index: test_index - - match: { hits.total: 10 } # 5 docs from old cluster, 5 docs from mixed cluster + - match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster + + - do: + delete: + index: test_index + type: test_type + id: d10 + + - do: + indices.flush: + index: test_index + + - do: + search: + index: test_index + + - match: { hits.total: 10 } --- "Verify custom cluster metadata still exists during upgrade": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count_percolate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count_percolate.json deleted file mode 100644 index 584f33685d3..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count_percolate.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "count_percolate": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-percolate.html", - "methods": ["GET", "POST"], - "url": { - "path": "/{index}/{type}/_percolate/count", - "paths": ["/{index}/{type}/_percolate/count", "/{index}/{type}/{id}/_percolate/count"], - "parts": { - "index": { - "type": "string", - "required": true, - "description": "The index of the document being count percolated." - }, - "type": { - "type": "string", - "required": true, - "description": "The type of the document being count percolated." - }, - "id": { - "type": "string", - "required": false, - "description": "Substitute the document in the request body with a document that is known by the specified id. On top of the id, the index and type parameter will be used to retrieve the document from within the cluster." - } - }, - "params": { - "routing": { - "type": "list", - "description": "A comma-separated list of specific routing values" - }, - "preference": { - "type": "string", - "description": "Specify the node or shard the operation should be performed on (default: random)" - }, - "ignore_unavailable": { - "type": "boolean", - "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type": "boolean", - "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type": "enum", - "options": ["open", "closed","none","all"], - "default": "open", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "percolate_index": { - "type": "string", - "description": "The index to count percolate the document into. Defaults to index." - }, - "percolate_type": { - "type": "string", - "description": "The type to count percolate document into. Defaults to type." - }, - "version": { - "type": "number", - "description": "Explicit version number for concurrency control" - }, - "version_type": { - "type": "enum", - "options": ["internal", "external", "external_gte", "force"], - "description": "Specific version type" - } - } - }, - "body": { - "description": "The count percolator request definition using the percolate DSL", - "required": false - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index de1e9246f7b..96c13591197 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -17,7 +17,7 @@ }, "metric" : { "type" : "list", - "options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "percolate", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], + "options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], "description" : "Limit the information returned the specific metrics." } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mpercolate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mpercolate.json deleted file mode 100644 index 7cbf4f61e43..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mpercolate.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "mpercolate": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-percolate.html", - "methods": ["GET", "POST"], - "url": { - "path": "/_mpercolate", - "paths": ["/_mpercolate", "/{index}/_mpercolate", "/{index}/{type}/_mpercolate"], - "parts": { - "index": { - "type": "string", - "description": "The index of the document being count percolated to use as default" - }, - "type": { - "type" : "string", - "description" : "The type of the document being percolated to use as default." - } - }, - "params": { - "ignore_unavailable": { - "type": "boolean", - "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type": "boolean", - "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type": "enum", - "options": ["open", "closed","none","all"], - "default": "open", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." - } - } - }, - "body": { - "description": "The percolate request definitions (header & body pair), separated by newlines", - "required": true, - "serialize" : "bulk" - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index a5910c9f328..a3d96121c3a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -20,7 +20,7 @@ }, "index_metric" : { "type" : "list", - "options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "percolate", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], + "options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], "description" : "Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified." }, "node_id": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/percolate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/percolate.json deleted file mode 100644 index e58655dea5a..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/percolate.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "percolate": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-percolate.html", - "methods": ["GET", "POST"], - "url": { - "path": "/{index}/{type}/_percolate", - "paths": ["/{index}/{type}/_percolate", "/{index}/{type}/{id}/_percolate"], - "parts": { - "index": { - "type" : "string", - "required" : true, - "description" : "The index of the document being percolated." - }, - "type": { - "type" : "string", - "required" : true, - "description" : "The type of the document being percolated." - }, - "id": { - "type" : "string", - "required" : false, - "description" : "Substitute the document in the request body with a document that is known by the specified id. On top of the id, the index and type parameter will be used to retrieve the document from within the cluster." - } - }, - "params": { - "routing": { - "type" : "list", - "description" : "A comma-separated list of specific routing values" - }, - "preference": { - "type" : "string", - "description" : "Specify the node or shard the operation should be performed on (default: random)" - }, - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "percolate_index": { - "type" : "string", - "description" : "The index to percolate the document into. Defaults to index." - }, - "percolate_type": { - "type" : "string", - "description" : "The type to percolate document into. Defaults to type." - }, - "percolate_routing": { - "type" : "string", - "description" : "The routing value to use when percolating the existing document." - }, - "percolate_preference": { - "type" : "string", - "description" : "Which shard to prefer when executing the percolate request." - }, - "percolate_format": { - "type" : "enum", - "options" : ["ids"], - "description" : "Return an array of matching query IDs instead of objects" - }, - "version" : { - "type" : "number", - "description" : "Explicit version number for concurrency control" - }, - "version_type": { - "type" : "enum", - "options" : ["internal", "external", "external_gte", "force"], - "description" : "Specific version type" - } - } - }, - "body": { - "description" : "The percolator request definition using the percolate DSL", - "required" : false - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index 117fbf74f56..52379390d47 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -18,12 +18,26 @@ setup: type: long double: type: double + number: + type: long scaled_float: type: scaled_float scaling_factor: 100 date: type: date + - do: + indices.create: + index: test_2 + body: + settings: + number_of_replicas: 0 + mappings: + test: + properties: + number: + type: double + - do: cluster.health: wait_for_status: green @@ -143,6 +157,7 @@ setup: - do: catch: request search: + index: test_1 body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : "127.*" } } } } @@ -667,3 +682,68 @@ setup: - match: { aggregations.double_terms.buckets.0.key: 3.5 } - match: { aggregations.double_terms.buckets.0.doc_count: 1 } + +--- +"Mixing longs and doubles": + + - skip: + version: " - 5.99.99" + reason: in 6.0 longs and doubles are compatible within a terms agg (longs are promoted to doubles) + + - do: + index: + index: test_1 + type: test + id: 1 + body: {"number": 100} + + - do: + index: + index: test_1 + type: test + id: 2 + body: {"number": 10} + + - do: + index: + index: test_2 + type: test + id: 3 + body: {"number": 100.0} + + - do: + index: + index: test_2 + type: test + id: 1 + body: {"number": 10.0} + + - do: + index: + index: test_2 + type: test + id: 2 + body: {"number": 14.6} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "number_terms" : { "terms" : { "field" : "number" } } } } + + - match: { hits.total: 5 } + + - length: { aggregations.number_terms.buckets: 3 } + + - match: { aggregations.number_terms.buckets.0.key: 10.0 } + + - match: { aggregations.number_terms.buckets.0.doc_count: 2 } + + - match: { aggregations.number_terms.buckets.1.key: 100.0 } + + - match: { aggregations.number_terms.buckets.1.doc_count: 2 } + + - match: { aggregations.number_terms.buckets.2.key: 14.6 } + + - match: { aggregations.number_terms.buckets.2.doc_count: 1 } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index 933fd83ad5c..c4c1a45d6c3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -54,12 +54,12 @@ public class ClusterServiceUtils { clusterService.setLocalNode(localNode); clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToNodes(List addedNodes) { + public void connectToNodes(Iterable discoveryNodes) { // skip } @Override - public void disconnectFromNodes(List removedNodes) { + public void disconnectFromNodesExcept(Iterable nodesToKeep) { // skip } }); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 38c4f6003be..6c575126276 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -132,6 +132,7 @@ import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; import static org.apache.lucene.util.LuceneTestCase.rarely; import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; +import static org.elasticsearch.test.ESTestCase.awaitBusy; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -1052,21 +1053,38 @@ public final class InternalTestCluster extends TestCluster { logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); } - /** ensure a cluster is form with {@link #nodes}.size() nodes. */ + /** ensure a cluster is formed with all published nodes. */ private void validateClusterFormed() { String name = randomFrom(random, getNodeNames()); validateClusterFormed(name); } - /** ensure a cluster is form with {@link #nodes}.size() nodes, but do so by using the client of the specified node */ + /** ensure a cluster is formed with all published nodes, but do so by using the client of the specified node */ private void validateClusterFormed(String viaNode) { - final int size = nodes.size(); - logger.trace("validating cluster formed via [{}], expecting [{}]", viaNode, size); + Set expectedNodes = new HashSet<>(); + for (NodeAndClient nodeAndClient : nodes.values()) { + expectedNodes.add(getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode()); + } + logger.trace("validating cluster formed via [{}], expecting {}", viaNode, expectedNodes); final Client client = client(viaNode); - ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(size)).get(); - if (response.isTimedOut()) { - logger.warn("failed to wait for a cluster of size [{}], got [{}]", size, response); - throw new IllegalStateException("cluster failed to reach the expected size of [" + size + "]"); + try { + if (awaitBusy(() -> { + DiscoveryNodes discoveryNodes = client.admin().cluster().prepareState().get().getState().nodes(); + if (discoveryNodes.getSize() != expectedNodes.size()) { + return false; + } + for (DiscoveryNode expectedNode : expectedNodes) { + if (discoveryNodes.nodeExists(expectedNode) == false) { + return false; + } + } + return true; + }, 30, TimeUnit.SECONDS) == false) { + throw new IllegalStateException("cluster failed to from with expected nodes " + expectedNodes + " and actual nodes " + + client.admin().cluster().prepareState().get().getState().nodes()); + } + } catch (InterruptedException e) { + throw new IllegalStateException(e); } } @@ -1128,13 +1146,8 @@ public final class InternalTestCluster extends TestCluster { .map(task -> task.taskInfo(localNode.getId(), true)) .collect(Collectors.toList()); ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); - XContentBuilder builder = null; try { - builder = XContentFactory.jsonBuilder() - .prettyPrint() - .startObject() - .value(response) - .endObject(); + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().value(response); throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + builder.string()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java index f23e243074d..7611c17492e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java @@ -19,9 +19,11 @@ package org.elasticsearch.test; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.RoutingMissingException; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -29,15 +31,22 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.shard.IndexShardRecoveringException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.Base64; import java.util.List; import java.util.Random; +import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; +import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiOfLength; import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomUnicodeOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomFrom; public final class RandomObjects { @@ -56,10 +65,10 @@ public final class RandomObjects { * @param xContentType the content type, used to determine what the expected values are for float numbers. */ public static Tuple, List> randomStoredFieldValues(Random random, XContentType xContentType) { - int numValues = RandomNumbers.randomIntBetween(random, 1, 5); + int numValues = randomIntBetween(random, 1, 5); List originalValues = new ArrayList<>(); List expectedParsedValues = new ArrayList<>(); - int dataType = RandomNumbers.randomIntBetween(random, 0, 8); + int dataType = randomIntBetween(random, 0, 8); for (int i = 0; i < numValues; i++) { switch(dataType) { case 0: @@ -153,7 +162,7 @@ public final class RandomObjects { * Randomly adds fields, objects, or arrays to the provided builder. The maximum depth is 5. */ private static void addFields(Random random, XContentBuilder builder, int currentDepth) throws IOException { - int numFields = RandomNumbers.randomIntBetween(random, 1, 5); + int numFields = randomIntBetween(random, 1, 5); for (int i = 0; i < numFields; i++) { if (currentDepth < 5 && random.nextBoolean()) { if (random.nextBoolean()) { @@ -162,7 +171,7 @@ public final class RandomObjects { builder.endObject(); } else { builder.startArray(RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)); - int numElements = RandomNumbers.randomIntBetween(random, 1, 5); + int numElements = randomIntBetween(random, 1, 5); boolean object = random.nextBoolean(); int dataType = -1; if (object == false) { @@ -187,7 +196,7 @@ public final class RandomObjects { } private static int randomDataType(Random random) { - return RandomNumbers.randomIntBetween(random, 0, 3); + return randomIntBetween(random, 0, 3); } private static Object randomFieldValue(Random random, int dataType) { @@ -204,4 +213,41 @@ public final class RandomObjects { throw new UnsupportedOperationException(); } } + + /** + * Returns a random {@link ShardInfo} object with on or more {@link ShardInfo.Failure} if requested. + * + * @param random Random generator + * @param failures If true, the {@link ShardInfo} will have random failures + * @return a random {@link ShardInfo} + */ + public static ShardInfo randomShardInfo(Random random, boolean failures) { + int total = randomIntBetween(random, 1, 10); + if (failures == false) { + return new ShardInfo(total, total); + } + + int successful = randomIntBetween(random, 1, total); + return new ShardInfo(total, successful, randomShardInfoFailures(random, Math.max(1, (total - successful)))); + } + + public static ShardInfo.Failure[] randomShardInfoFailures(Random random, int nbFailures) { + List randomFailures = new ArrayList<>(nbFailures); + for (int i = 0; i < nbFailures; i++) { + randomFailures.add(randomShardInfoFailure(random)); + } + return randomFailures.toArray(new ShardInfo.Failure[nbFailures]); + } + + public static ShardInfo.Failure randomShardInfoFailure(Random random) { + String index = randomAsciiOfLength(random, 5); + String indexUuid = randomAsciiOfLength(random, 5); + int shardId = randomIntBetween(random, 1, 10); + ShardId shard = new ShardId(index, indexUuid, shardId); + RestStatus restStatus = randomFrom(RestStatus.values()); + Exception exception = RandomPicks.randomFrom(random, Arrays.asList(new IndexShardRecoveringException(shard), + new ElasticsearchException(new IllegalArgumentException("Argument is wrong")), + new RoutingMissingException(index, randomAsciiOfLength(random, 5), randomAsciiOfLength(random, 5)))); + return new ShardInfo.Failure(shard, randomAsciiOfLength(random, 3), exception, restStatus, random.nextBoolean()); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index b0cc848da61..bc8bd946240 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -64,7 +64,6 @@ import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.LinkedBlockingDeque; @@ -743,6 +742,8 @@ public final class MockTransportService extends TransportService { @Override protected void doClose() { super.doClose(); - assert openConnections.size() == 0 : "still open connections: " + openConnections; + synchronized (openConnections) { + assert openConnections.size() == 0 : "still open connections: " + openConnections; + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 3b5b430f606..a08660bb388 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -49,6 +49,8 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.SocketException; import java.net.SocketTimeoutException; +import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -74,6 +76,8 @@ public class MockTcpTransport extends TcpTransport */ public static final ConnectionProfile LIGHT_PROFILE; + private final Map openChannels = new IdentityHashMap<>(); + static { ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, @@ -284,6 +288,9 @@ public class MockTcpTransport extends TcpTransport this.serverSocket = null; this.profile = profile; this.onClose = () -> onClose.accept(this); + synchronized (openChannels) { + openChannels.put(this, Boolean.TRUE); + } } /** @@ -353,12 +360,17 @@ public class MockTcpTransport extends TcpTransport @Override public void close() throws IOException { if (isOpen.compareAndSet(true, false)) { + final Boolean removedChannel; + synchronized (openChannels) { + removedChannel = openChannels.remove(this); + } //establish a happens-before edge between closing and accepting a new connection synchronized (this) { onChannelClosed(this); IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels.keySet()), () -> cancellableThreads.cancel("channel closed"), onClose); } + assert removedChannel : "Channel was not removed or removed twice?"; } } } @@ -395,5 +407,16 @@ public class MockTcpTransport extends TcpTransport return mockVersion; } + @Override + protected void doClose() { + if (Thread.currentThread().isInterrupted() == false) { + // TCPTransport might be interrupted due to a timeout waiting for connections to be closed. + // in this case the thread is interrupted and we can't tell if we really missed something or if we are + // still closing connections. in such a case we don't assert the open channels + synchronized (openChannels) { + assert openChannels.isEmpty() : "there are still open channels: " + openChannels; + } + } + } }