diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index b9f7e256143..06082ed7d29 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -84,7 +85,7 @@ public class RestNoopBulkAction extends BaseRestHandler { } private static class BulkRestBuilderListener extends RestBuilderListener { - private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update", + private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index dcc225c2603..2a5efee1881 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -34,7 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class TransportNoopBulkAction extends HandledTransportAction { - private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update", + private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); @Inject diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 65cb867bec9..0d8c3b72672 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -89,12 +89,19 @@ public class Version { public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_5_0_0_rc1_ID = 5000051; public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_rc2_ID = 5000052; - public static final Version V_5_0_0_rc2 = new Version(V_5_0_0_rc2_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final Version CURRENT = V_6_0_0_alpha1; + /* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT. + * If you need a version that doesn't exist here for instance V_5_1_0 then go and create such a version + * as a constant where you need it: + *
+     *   public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
+     * 
+ * Then go to VersionsTest.java and add a test for this constant VersionTests#testUnknownVersions(). + * This is particularly useful if you are building a feature that needs a BWC layer for this unreleased version etc.*/ + static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" + org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]"; @@ -108,8 +115,6 @@ public class Version { switch (id) { case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_0_0_rc2_ID: - return V_5_0_0_rc2; case V_5_0_0_rc1_ID: return V_5_0_0_rc1; case V_5_0_0_beta1_ID: diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java new file mode 100644 index 00000000000..09db7089ff6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.VersionType; + +import java.io.IOException; +import java.util.Locale; + +/** + * Generic interface to group ActionRequest, which perform writes to a single document + * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} + */ +public interface DocWriteRequest extends IndicesRequest { + + /** + * Get the index that this request operates on + * @return the index + */ + String index(); + + /** + * Get the type that this request operates on + * @return the type + */ + String type(); + + /** + * Get the id of the document for this request + * @return the id + */ + String id(); + + /** + * Get the options for this request + * @return the indices options + */ + IndicesOptions indicesOptions(); + + /** + * Set the routing for this request + * @return the Request + */ + T routing(String routing); + + /** + * Get the routing for this request + * @return the Routing + */ + String routing(); + + + /** + * Get the parent for this request + * @return the Parent + */ + String parent(); + + /** + * Get the document version for this request + * @return the document version + */ + long version(); + + /** + * Sets the version, which will perform the operation only if a matching + * version exists and no changes happened on the doc since then. + */ + T version(long version); + + /** + * Get the document version type for this request + * @return the document version type + */ + VersionType versionType(); + + /** + * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. + */ + T versionType(VersionType versionType); + + /** + * Get the requested document operation type of the request + * @return the operation type {@link OpType} + */ + OpType opType(); + + /** + * Requested operation type to perform on the document + */ + enum OpType { + /** + * Index the source. If there an existing document with the id, it will + * be replaced. + */ + INDEX(0), + /** + * Creates the resource. Simply adds it to the index, if there is an existing + * document with the id, then it won't be removed. + */ + CREATE(1), + /** Updates a document */ + UPDATE(2), + /** Deletes a document */ + DELETE(3); + + private final byte op; + private final String lowercase; + + OpType(int op) { + this.op = (byte) op; + this.lowercase = this.toString().toLowerCase(Locale.ROOT); + } + + public byte getId() { + return op; + } + + public String getLowercase() { + return lowercase; + } + + public static OpType fromId(byte id) { + switch (id) { + case 0: return INDEX; + case 1: return CREATE; + case 2: return UPDATE; + case 3: return DELETE; + default: throw new IllegalArgumentException("Unknown opType: [" + id + "]"); + } + } + + public static OpType fromString(String sOpType) { + String lowerCase = sOpType.toLowerCase(Locale.ROOT); + for (OpType opType : OpType.values()) { + if (opType.getLowercase().equals(lowerCase)) { + return opType; + } + } + throw new IllegalArgumentException("Unknown opType: [" + sOpType + "]"); + } + } + + /** read a document write (index/delete/update) request */ + static DocWriteRequest readDocumentRequest(StreamInput in) throws IOException { + byte type = in.readByte(); + DocWriteRequest docWriteRequest; + if (type == 0) { + IndexRequest indexRequest = new IndexRequest(); + indexRequest.readFrom(in); + docWriteRequest = indexRequest; + } else if (type == 1) { + DeleteRequest deleteRequest = new DeleteRequest(); + deleteRequest.readFrom(in); + docWriteRequest = deleteRequest; + } else if (type == 2) { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.readFrom(in); + docWriteRequest = updateRequest; + } else { + throw new IllegalStateException("invalid request type [" + type+ " ]"); + } + return docWriteRequest; + } + + /** write a document write (index/delete/update) request*/ + static void writeDocumentRequest(StreamOutput out, DocWriteRequest request) throws IOException { + if (request instanceof IndexRequest) { + out.writeByte((byte) 0); + ((IndexRequest) request).writeTo(out); + } else if (request instanceof DeleteRequest) { + out.writeByte((byte) 1); + ((DeleteRequest) request).writeTo(out); + } else if (request instanceof UpdateRequest) { + out.writeByte((byte) 2); + ((UpdateRequest) request).writeTo(out); + } else { + throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]"); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java deleted file mode 100644 index a90f013a6b9..00000000000 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action; - -import org.elasticsearch.action.support.IndicesOptions; - -/** - * Generic interface to group ActionRequest, which work on single document level - * - * Forces this class return index/type/id getters - */ -public interface DocumentRequest extends IndicesRequest { - - /** - * Get the index that this request operates on - * @return the index - */ - String index(); - - /** - * Get the type that this request operates on - * @return the type - */ - String type(); - - /** - * Get the id of the document for this request - * @return the id - */ - String id(); - - /** - * Get the options for this request - * @return the indices options - */ - IndicesOptions indicesOptions(); - - /** - * Set the routing for this request - * @return the Request - */ - T routing(String routing); - - /** - * Get the routing for this request - * @return the Routing - */ - String routing(); - - - /** - * Get the parent for this request - * @return the Parent - */ - String parent(); - -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 831ef6e1060..2ccf2f1bd3e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -20,14 +20,15 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.internal.AliasFilter; import java.io.IOException; +import java.util.Objects; /** * Internal validate request executed directly against a specific index shard. @@ -39,21 +40,18 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { private boolean explain; private boolean rewrite; private long nowInMillis; - - @Nullable - private String[] filteringAliases; + private AliasFilter filteringAliases; public ShardValidateQueryRequest() { - } - ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) { + public ShardValidateQueryRequest(ShardId shardId, AliasFilter filteringAliases, ValidateQueryRequest request) { super(shardId, request); this.query = request.query(); this.types = request.types(); this.explain = request.explain(); this.rewrite = request.rewrite(); - this.filteringAliases = filteringAliases; + this.filteringAliases = Objects.requireNonNull(filteringAliases, "filteringAliases must not be null"); this.nowInMillis = request.nowInMillis; } @@ -69,11 +67,11 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { return this.explain; } - public boolean rewrite() { - return this.rewrite; + public boolean rewrite() { + return this.rewrite; } - public String[] filteringAliases() { + public AliasFilter filteringAliases() { return filteringAliases; } @@ -93,14 +91,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { types[i] = in.readString(); } } - int aliasesSize = in.readVInt(); - if (aliasesSize > 0) { - filteringAliases = new String[aliasesSize]; - for (int i = 0; i < aliasesSize; i++) { - filteringAliases[i] = in.readString(); - } - } - + filteringAliases = new AliasFilter(in); explain = in.readBoolean(); rewrite = in.readBoolean(); nowInMillis = in.readVLong(); @@ -110,20 +101,11 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - out.writeVInt(types.length); for (String type : types) { out.writeString(type); } - if (filteringAliases != null) { - out.writeVInt(filteringAliases.length); - for (String alias : filteringAliases) { - out.writeString(alias); - } - } else { - out.writeVInt(0); - } - + filteringAliases.writeTo(out); out.writeBoolean(explain); out.writeBoolean(rewrite); out.writeVLong(nowInMillis); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 25ced69f03a..b80b721149c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.action.ActionListener; @@ -43,6 +42,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.tasks.Task; @@ -77,8 +77,9 @@ public class TransportValidateQueryAction extends TransportBroadcastAction) request); + return add((DocWriteRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((ActionRequest) request); + return add((DocWriteRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(ActionRequest request) { + public BulkProcessor add(DocWriteRequest request) { return add(request, null); } - public BulkProcessor add(ActionRequest request, @Nullable Object payload) { + public BulkProcessor add(DocWriteRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocWriteRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index b9c615c260c..39102913262 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -49,6 +50,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -70,7 +72,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List> requests = new ArrayList<>(); + final List requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -85,14 +87,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(ActionRequest... requests) { - for (ActionRequest request : requests) { + public BulkRequest add(DocWriteRequest... requests) { + for (DocWriteRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(ActionRequest request) { + public BulkRequest add(DocWriteRequest request) { return add(request, null); } @@ -102,7 +104,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(ActionRequest request, @Nullable Object payload) { + public BulkRequest add(DocWriteRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -118,8 +120,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable> requests) { - for (ActionRequest request : requests) { + public BulkRequest add(Iterable requests) { + for (DocWriteRequest request : requests) { add(request); } return this; @@ -205,18 +207,13 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List> requests() { + public List requests() { return this.requests; } @Override public List subRequests() { - List indicesRequests = new ArrayList<>(); - for (ActionRequest request : requests) { - assert request instanceof IndicesRequest; - indicesRequests.add((IndicesRequest) request); - } - return indicesRequests; + return requests.stream().collect(Collectors.toList()); } /** @@ -511,7 +508,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (ActionRequest actionRequest : requests) { + for (DocWriteRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -529,13 +526,13 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (ActionRequest request : requests) { + for (DocWriteRequest request : requests) { // We first check if refresh has been set if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } - ActionRequestValidationException ex = request.validate(); + ActionRequestValidationException ex = ((WriteRequest) request).validate(); if (ex != null) { if (validationException == null) { validationException = new ActionRequestValidationException(); @@ -553,20 +550,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards = ActiveShardCount.readFrom(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - byte type = in.readByte(); - if (type == 0) { - IndexRequest request = new IndexRequest(); - request.readFrom(in); - requests.add(request); - } else if (type == 1) { - DeleteRequest request = new DeleteRequest(); - request.readFrom(in); - requests.add(request); - } else if (type == 2) { - UpdateRequest request = new UpdateRequest(); - request.readFrom(in); - requests.add(request); - } + requests.add(DocWriteRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); timeout = new TimeValue(in); @@ -577,15 +561,8 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (ActionRequest request : requests) { - if (request instanceof IndexRequest) { - out.writeByte((byte) 0); - } else if (request instanceof DeleteRequest) { - out.writeByte((byte) 1); - } else if (request instanceof UpdateRequest) { - out.writeByte((byte) 2); - } - request.writeTo(out); + for (DocWriteRequest request : requests) { + DocWriteRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 1943b793692..854b2fcf892 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -19,12 +19,10 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -58,16 +56,19 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; +import java.util.stream.Collectors; +/** + * Groups bulk request items by shard, optionally creating non-existent indices and + * delegates to {@link TransportShardBulkAction} for shard-level bulk execution + */ public class TransportBulkAction extends HandledTransportAction { private final AutoCreateIndex autoCreateIndex; @@ -116,15 +117,9 @@ public class TransportBulkAction extends HandledTransportAction autoCreateIndices = new HashSet<>(); - for (ActionRequest request : bulkRequest.requests) { - if (request instanceof DocumentRequest) { - DocumentRequest req = (DocumentRequest) request; - autoCreateIndices.add(req.index()); - } else { - throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); - } - } + final Set autoCreateIndices = bulkRequest.requests.stream() + .map(DocWriteRequest::index) + .collect(Collectors.toSet()); final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); ClusterState state = clusterService.state(); for (String index : autoCreateIndices) { @@ -150,7 +145,7 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, ActionRequest request, String index, Exception e) { - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - if (index.equals(indexRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "index", new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e))); - return true; - } - } else if (request instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) request; - if (index.equals(deleteRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "delete", new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), e))); - return true; - } - } else if (request instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) request; - if (index.equals(updateRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "update", new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), e))); - return true; - } - } else { - throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocWriteRequest request, String index, Exception e) { + if (index.equals(request.index())) { + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); + return true; } return false; } @@ -233,95 +211,56 @@ public class TransportBulkAction extends HandledTransportAction Operations mapping Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - ActionRequest request = bulkRequest.requests.get(i); - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.id(), indexRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); - } else if (request instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.id(), deleteRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); - } else if (request instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.id(), updateRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); + DocWriteRequest request = bulkRequest.requests.get(i); + if (request == null) { + continue; } + String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); + ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); + List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); + shardRequests.add(new BulkItemRequest(i, request)); } if (requestsByShard.isEmpty()) { @@ -361,19 +300,9 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, - final ConcreteIndices concreteIndices, - final MetaData metaData) { + private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, BulkRequest bulkRequest, AtomicArray responses, int idx, + final ConcreteIndices concreteIndices, + final MetaData metaData) { Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { @@ -410,15 +339,7 @@ public class TransportBulkAction extends HandledTransportAction { - private static final String OP_TYPE_UPDATE = "update"; - private static final String OP_TYPE_DELETE = "delete"; - public static final String ACTION_NAME = BulkAction.NAME + "[s]"; private final UpdateHelper updateHelper; @@ -116,8 +109,7 @@ public class TransportShardBulkAction extends TransportWriteAction(response, location); } - private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - if (item.request() instanceof IndexRequest) { - location = index(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - } else if (item.request() instanceof DeleteRequest) { - location = delete(request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - } else if (item.request() instanceof UpdateRequest) { - Tuple tuple = update(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - location = tuple.v1(); - item = tuple.v2(); - } else { - throw new IllegalStateException("Unexpected index operation: " + item.request()); + /** Executes bulk item requests and handles request execution exceptions */ + private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, + long[] preVersions, VersionType[] preVersionTypes, + Translog.Location location, int requestIndex) { + preVersions[requestIndex] = request.items()[requestIndex].request().version(); + preVersionTypes[requestIndex] = request.items()[requestIndex].request().versionType(); + DocWriteRequest.OpType opType = request.items()[requestIndex].request().opType(); + try { + WriteResult writeResult = innerExecuteBulkItemRequest(metaData, indexShard, + request, requestIndex); + if (writeResult.getLocation() != null) { + location = locationToSync(location, writeResult.getLocation()); + } else { + assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP + : "only noop operation can have null next operation"; + } + // update the bulk item request because update request execution can mutate the bulk item request + BulkItemRequest item = request.items()[requestIndex]; + // add the response + setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse())); + } catch (Exception e) { + // rethrow the failure if we are going to retry on primary and let parent failure to handle it + if (retryPrimaryException(e)) { + // restore updated versions... + for (int j = 0; j < requestIndex; j++) { + DocWriteRequest docWriteRequest = request.items()[j].request(); + docWriteRequest.version(preVersions[j]); + docWriteRequest.versionType(preVersionTypes[j]); + } + throw (ElasticsearchException) e; + } + BulkItemRequest item = request.items()[requestIndex]; + DocWriteRequest docWriteRequest = item.request(); + if (isConflictException(e)) { + logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), e); + } else { + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), e); + } + // if its a conflict failure, and we already executed the request on a primary (and we execute it + // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) + // then just use the response we got from the successful execution + if (item.getPrimaryResponse() != null && isConflictException(e)) { + setResponse(item, item.getPrimaryResponse()); + } else { + setResponse(item, new BulkItemResponse(item.id(), docWriteRequest.opType(), + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), e))); + } } - - assert item.getPrimaryResponse() != null; + assert request.items()[requestIndex].getPrimaryResponse() != null; assert preVersionTypes[requestIndex] != null; return location; } - private Translog.Location index(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - IndexRequest indexRequest = (IndexRequest) item.request(); - preVersions[requestIndex] = indexRequest.version(); - preVersionTypes[requestIndex] = indexRequest.versionType(); - try { - WriteResult result = shardIndexOperation(request, indexRequest, metaData, indexShard, true); - location = locationToSync(location, result.getLocation()); - // add the response - IndexResponse indexResponse = result.getResponse(); - setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); - } catch (Exception e) { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - logFailure(e, "index", request.shardId(), indexRequest); - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else { - setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), - new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); - } - } - return location; - } - - private > void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest request) { - if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t); - } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t); - } - } - - private Translog.Location delete(BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - DeleteRequest deleteRequest = (DeleteRequest) item.request(); - preVersions[requestIndex] = deleteRequest.version(); - preVersionTypes[requestIndex] = deleteRequest.versionType(); - - try { - // add the response - final WriteResult writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = writeResult.getResponse(); - location = locationToSync(location, writeResult.getLocation()); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); - } catch (Exception e) { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - logFailure(e, "delete", request.shardId(), deleteRequest); - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); - } - } - return location; - } - - private Tuple update(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - UpdateRequest updateRequest = (UpdateRequest) item.request(); - preVersions[requestIndex] = updateRequest.version(); - preVersionTypes[requestIndex] = updateRequest.versionType(); - // We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE - for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) { - UpdateResult updateResult; - try { - updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard); - } catch (Exception t) { - updateResult = new UpdateResult(null, null, false, t, null); - } - if (updateResult.success()) { - if (updateResult.writeResult != null) { - location = locationToSync(location, updateResult.writeResult.getLocation()); - } - switch (updateResult.result.getResponseResult()) { - case CREATED: - case UPDATED: - @SuppressWarnings("unchecked") - WriteResult result = updateResult.writeResult; - IndexRequest indexRequest = updateResult.request(); - BytesReference indexSourceAsBytes = indexRequest.source(); - // add the response - IndexResponse indexResponse = result.getResponse(); - UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); - if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || - (updateRequest.fields() != null && updateRequest.fields().length > 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); - } - item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); - break; - case DELETED: - @SuppressWarnings("unchecked") - WriteResult writeResult = updateResult.writeResult; - DeleteResponse response = writeResult.getResponse(); - DeleteRequest deleteRequest = updateResult.request(); - updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); - // Replace the update request to the translated delete request to execute on the replica. - item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); - break; - case NOOP: - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult)); - item.setIgnoreOnReplica(); // no need to go to the replica - break; - default: - throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult()); - } - // NOTE: Breaking out of the retry_on_conflict loop! - break; - } else if (updateResult.failure()) { - Throwable e = updateResult.error; - if (updateResult.retry) { - // updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration - if (updateAttemptsCount >= updateRequest.retryOnConflict()) { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, - new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); - } - } else { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else if (updateResult.result == null) { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); - } else { - switch (updateResult.result.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = updateResult.request(); - logFailure(e, "index", request.shardId(), indexRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, - new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); - break; - case DELETED: - DeleteRequest deleteRequest = updateResult.request(); - logFailure(e, "delete", request.shardId(), deleteRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); - break; - default: - throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult()); + private WriteResult innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, int requestIndex) throws Exception { + DocWriteRequest itemRequest = request.items()[requestIndex].request(); + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction); + case UPDATE: + int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict(); + for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { + try { + return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest)); + } catch (Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (attemptCount == maxAttempts // bubble up exception when we run out of attempts + || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict + throw e; } } - // NOTE: Breaking out of the retry_on_conflict loop! - break; } - - } + throw new IllegalStateException("version conflict exception should bubble up on last attempt"); + case DELETE: + return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); } - return Tuple.tuple(location, item); } private void setResponse(BulkItemRequest request, BulkItemResponse response) { @@ -338,105 +214,49 @@ public class TransportShardBulkAction extends TransportWriteAction shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, - IndexShard indexShard, boolean processed) throws Exception { - - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); - if (!processed) { - indexRequest.process(mappingMd, allowIdGeneration, request.index()); - } - return TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); - } - - static class UpdateResult { - - final UpdateHelper.Result result; - final ActionRequest actionRequest; - final boolean retry; - final Throwable error; - final WriteResult writeResult; - final UpdateResponse noopResult; - - UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) { - this.result = result; - this.actionRequest = actionRequest; - this.retry = retry; - this.error = error; - this.writeResult = writeResult; - this.noopResult = null; - } - - UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) { - this.result = result; - this.actionRequest = actionRequest; - this.writeResult = writeResult; - this.retry = false; - this.error = null; - this.noopResult = null; - } - - public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) { - this.result = result; - this.noopResult = updateResponse; - this.actionRequest = null; - this.writeResult = null; - this.retry = false; - this.error = null; - } - - - boolean failure() { - return error != null; - } - - boolean success() { - return noopResult != null || writeResult != null; - } - - @SuppressWarnings("unchecked") - T request() { - return (T) actionRequest; - } - - - } - - private UpdateResult shardUpdateOperation(IndexMetaData metaData, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { + /** + * Executes update request, doing a get and translating update to a index or delete operation + * NOTE: all operations except NOOP, reassigns the bulk item request + */ + private WriteResult shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, + int requestIndex, UpdateRequest updateRequest) + throws Exception { + // Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard, threadPool::estimatedTimeInMillis); switch (translate.getResponseResult()) { case CREATED: case UPDATED: IndexRequest indexRequest = translate.action(); - try { - WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, metaData, indexShard, false); - return new UpdateResult(translate, indexRequest, result); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - boolean retry = false; - if (cause instanceof VersionConflictEngineException) { - retry = true; - } - return new UpdateResult(translate, indexRequest, retry, cause, null); + MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + indexRequest.process(mappingMd, allowIdGeneration, request.index()); + WriteResult writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); + BytesReference indexSourceAsBytes = indexRequest.source(); + IndexResponse indexResponse = writeResult.getResponse(); + UpdateResponse update = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); + if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || + (updateRequest.fields() != null && updateRequest.fields().length > 0)) { + Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); + update.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); } + // Replace the update request to the translated index request to execute on the replica. + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); + return new WriteResult<>(update, writeResult.getLocation()); case DELETED: DeleteRequest deleteRequest = translate.action(); - try { - WriteResult result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - return new UpdateResult(translate, deleteRequest, result); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - boolean retry = false; - if (cause instanceof VersionConflictEngineException) { - retry = true; - } - return new UpdateResult(translate, deleteRequest, retry, cause, null); - } + WriteResult deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); + DeleteResponse response = deleteResult.getResponse(); + UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); + deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null)); + // Replace the update request to the translated delete request to execute on the replica. + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); + return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation()); case NOOP: - UpdateResponse updateResponse = translate.action(); + BulkItemRequest item = request.items()[requestIndex]; indexShard.noopUpdate(updateRequest.type()); - return new UpdateResult(translate, updateResponse); - default: - throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + item.setIgnoreOnReplica(); // no need to go to the replica + return new WriteResult<>(translate.action(), null); + default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); } } @@ -448,50 +268,32 @@ public class TransportShardBulkAction extends TransportWriteAction implements DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest { private String type; private String id; @@ -164,28 +164,33 @@ public class DeleteRequest extends ReplicatedWriteRequest impleme return this.routing; } - /** - * Sets the version, which will cause the delete operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public DeleteRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } + @Override public DeleteRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } + @Override + public OpType opType() { + return OpType.DELETE; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index fef1b307e99..5d8ca27657f 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.internal.AliasFilter; import java.io.IOException; @@ -43,7 +44,7 @@ public class ExplainRequest extends SingleShardRequest { private String[] storedFields; private FetchSourceContext fetchSourceContext; - private String[] filteringAlias = Strings.EMPTY_ARRAY; + private AliasFilter filteringAlias = new AliasFilter(null, Strings.EMPTY_ARRAY); long nowInMillis; @@ -131,11 +132,11 @@ public class ExplainRequest extends SingleShardRequest { return this; } - public String[] filteringAlias() { + public AliasFilter filteringAlias() { return filteringAlias; } - public ExplainRequest filteringAlias(String[] filteringAlias) { + public ExplainRequest filteringAlias(AliasFilter filteringAlias) { if (filteringAlias != null) { this.filteringAlias = filteringAlias; } @@ -166,7 +167,7 @@ public class ExplainRequest extends SingleShardRequest { routing = in.readOptionalString(); preference = in.readOptionalString(); query = in.readNamedWriteable(QueryBuilder.class); - filteringAlias = in.readStringArray(); + filteringAlias = new AliasFilter(in); storedFields = in.readOptionalStringArray(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); nowInMillis = in.readVLong(); @@ -180,7 +181,7 @@ public class ExplainRequest extends SingleShardRequest { out.writeOptionalString(routing); out.writeOptionalString(preference); out.writeNamedWriteable(query); - out.writeStringArray(filteringAlias); + filteringAlias.writeTo(out); out.writeOptionalStringArray(storedFields); out.writeOptionalWriteable(fetchSourceContext); out.writeVLong(nowInMillis); diff --git a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index dde4e3f42ad..65176c1df39 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.rescore.RescoreSearchContext; @@ -78,7 +79,9 @@ public class TransportExplainAction extends TransportSingleShardAction implements DocumentRequest { - - /** - * Operation type controls if the type of the index operation. - */ - public enum OpType { - /** - * Index the source. If there an existing document with the id, it will - * be replaced. - */ - INDEX((byte) 0), - /** - * Creates the resource. Simply adds it to the index, if there is an existing - * document with the id, then it won't be removed. - */ - CREATE((byte) 1); - - private final byte id; - private final String lowercase; - - OpType(byte id) { - this.id = id; - this.lowercase = this.toString().toLowerCase(Locale.ENGLISH); - } - - /** - * The internal representation of the operation type. - */ - public byte id() { - return id; - } - - public String lowercase() { - return this.lowercase; - } - - /** - * Constructs the operation type from its internal representation. - */ - public static OpType fromId(byte id) { - if (id == 0) { - return INDEX; - } else if (id == 1) { - return CREATE; - } else { - throw new IllegalArgumentException("No type match for [" + id + "]"); - } - } - - public static OpType fromString(String sOpType) { - String lowersOpType = sOpType.toLowerCase(Locale.ROOT); - switch (lowersOpType) { - case "create": - return OpType.CREATE; - case "index": - return OpType.INDEX; - default: - throw new IllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); - } - } - - } +public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest { private String type; private String id; @@ -526,6 +465,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement * Sets the type of operation to perform. */ public IndexRequest opType(OpType opType) { + if (opType != OpType.CREATE && opType != OpType.INDEX) { + throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]"); + } this.opType = opType; if (opType == OpType.CREATE) { version(Versions.MATCH_DELETED); @@ -535,11 +477,19 @@ public class IndexRequest extends ReplicatedWriteRequest implement } /** - * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can + * Sets a string representation of the {@link #opType(OpType)}. Can * be either "index" or "create". */ public IndexRequest opType(String opType) { - return opType(OpType.fromString(opType)); + String op = opType.toLowerCase(Locale.ROOT); + if (op.equals("create")) { + opType(OpType.CREATE); + } else if (op.equals("index")) { + opType(OpType.INDEX); + } else { + throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]"); + } + return this; } @@ -554,34 +504,29 @@ public class IndexRequest extends ReplicatedWriteRequest implement } } - /** - * The type of operation to perform. - */ + @Override public OpType opType() { return this.opType; } - /** - * Sets the version, which will cause the index operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public IndexRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } - /** - * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. - */ + @Override public IndexRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } @@ -673,7 +618,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeOptionalString(timestamp); out.writeOptionalWriteable(ttl); out.writeBytesReference(source); - out.writeByte(opType.id()); + out.writeByte(opType.getId()); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index c4609e03aa5..310ef3fb928 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -200,7 +201,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder> { + static final class BulkRequestModifier implements Iterator { final BulkRequest bulkRequest; final Set failedSlots; @@ -150,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public ActionRequest next() { + public DocWriteRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -171,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - ActionRequest request = bulkRequest.requests().get(i); + DocWriteRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; @@ -207,7 +208,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio // 3) Continue with the next request in the bulk. failedSlots.add(currentSlot); BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e); - itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType().lowercase(), failure)); + itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure)); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java index 3ce14d8dacd..96db19d5472 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java @@ -26,8 +26,10 @@ abstract class AbstractAsyncAction { private final long startTime; - protected AbstractAsyncAction() { - this.startTime = System.currentTimeMillis(); + protected AbstractAsyncAction() { this(System.currentTimeMillis());} + + protected AbstractAsyncAction(long startTime) { + this.startTime = startTime; } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 6cb68b8e9be..bf6d34c93fb 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -27,104 +27,71 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.threadpool.ThreadPool; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; -import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest; abstract class AbstractSearchAsyncAction extends AbstractAsyncAction { protected final Logger logger; protected final SearchTransportService searchTransportService; - private final IndexNameExpressionResolver indexNameExpressionResolver; - protected final SearchPhaseController searchPhaseController; - protected final ThreadPool threadPool; + private final Executor executor; protected final ActionListener listener; - protected final GroupShardsIterator shardsIts; + private final GroupShardsIterator shardsIts; protected final SearchRequest request; - protected final ClusterState clusterState; - protected final DiscoveryNodes nodes; + /** Used by subclasses to resolve node ids to DiscoveryNodes. **/ + protected final Function nodeIdToDiscoveryNode; protected final int expectedSuccessfulOps; private final int expectedTotalOps; protected final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger totalOps = new AtomicInteger(); protected final AtomicArray firstResults; + private final Map aliasFilter; + private final long clusterStateVersion; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); protected volatile ScoreDoc[] sortedShardDocs; - protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, - IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, - ActionListener listener) { + protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, + Function nodeIdToDiscoveryNode, + Map aliasFilter, Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, long startTime, + long clusterStateVersion) { + super(startTime); this.logger = logger; this.searchTransportService = searchTransportService; - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.searchPhaseController = searchPhaseController; - this.threadPool = threadPool; + this.executor = executor; this.request = request; this.listener = listener; - - this.clusterState = clusterService.state(); - nodes = clusterState.nodes(); - - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name - // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead - // of just for the _search api - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(), - startTime(), request.indices()); - - for (String index : concreteIndices) { - clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); - } - - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), - request.indices()); - - shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); - final int shardCount = shardsIts.size(); - failIfOverShardCountLimit(clusterService, shardCount); - expectedSuccessfulOps = shardCount; + this.nodeIdToDiscoveryNode = nodeIdToDiscoveryNode; + this.clusterStateVersion = clusterStateVersion; + this.shardsIts = shardsIts; + expectedSuccessfulOps = shardsIts.size(); // we need to add 1 for non active partition, since we count it in the total! expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); - firstResults = new AtomicArray<>(shardsIts.size()); + this.aliasFilter = aliasFilter; } - private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { - final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING); - if (shardCount > shardCountLimit) { - throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " - + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " - + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " - + "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey() - + "] to a greater value if you really want to query that many shards at the same time."); - } - } + public void start() { if (expectedSuccessfulOps == 0) { @@ -152,14 +119,14 @@ abstract class AbstractSearchAsyncAction // no more active shards... (we should not really get here, but just for safety) onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - final DiscoveryNode node = nodes.get(shard.currentNodeId()); + final DiscoveryNode node = nodeIdToDiscoveryNode.apply(shard.currentNodeId()); if (node == null) { onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, - shard.index().getName(), request.indices()); - sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, - startTime()), new ActionListener() { + AliasFilter filter = this.aliasFilter.get(shard.index().getName()); + ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shard, shardsIts.size(), + filter, startTime()); + sendExecuteFirstPhase(node, transportRequest , new ActionListener() { @Override public void onResponse(FirstResult result) { onFirstPhaseResult(shardIndex, shard, result, shardIt); @@ -319,7 +286,7 @@ abstract class AbstractSearchAsyncAction private void raiseEarlyFailure(Exception e) { for (AtomicArray.Entry entry : firstResults.asList()) { try { - DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().nodeId()); sendReleaseSearchContext(entry.value.id(), node); } catch (Exception inner) { inner.addSuppressed(e); @@ -344,7 +311,7 @@ abstract class AbstractSearchAsyncAction if (queryResult.hasHits() && docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs try { - DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId()); sendReleaseSearchContext(entry.value.queryResult().id(), node); } catch (Exception e) { logger.trace("failed to release context", e); @@ -402,7 +369,7 @@ abstract class AbstractSearchAsyncAction sb.append(result.shardTarget()); } - logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version()); + logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterStateVersion); } moveToSecondPhase(); } @@ -410,4 +377,9 @@ abstract class AbstractSearchAsyncAction protected abstract void moveToSecondPhase() throws Exception; protected abstract String firstPhaseName(); + + protected Executor getExecutor() { + return executor; + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index ba73b0f4bea..24b1033ca5f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -24,31 +24,35 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { private final AtomicArray queryFetchResults; - + private final SearchPhaseController searchPhaseController; SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map aliasFilter, SearchPhaseController searchPhaseController, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + request, listener, shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; queryFetchResults = new AtomicArray<>(firstResults.length()); } @@ -70,7 +74,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest); } @@ -115,7 +119,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index ccd646ae129..1af6d4da4d1 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -26,36 +26,41 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { final AtomicArray queryResults; final AtomicArray fetchResults; final AtomicArray docIdsToLoad; + private final SearchPhaseController searchPhaseController; SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map aliasFilter, SearchPhaseController searchPhaseController, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + request, listener, shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; queryResults = new AtomicArray<>(firstResults.length()); fetchResults = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length()); @@ -78,7 +83,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } @@ -149,7 +154,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); - DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } @@ -192,7 +197,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index d799bc26764..4e8c3847ffc 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -22,24 +22,32 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.function.Function; class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { + private final SearchPhaseController searchPhaseController; + SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map aliasFilter, + SearchPhaseController searchPhaseController, Executor executor, + SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + request, listener, shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; + } @Override @@ -55,7 +63,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 6df2bb3f87e..0bcae7502ee 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -26,31 +26,38 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { final AtomicArray fetchResults; final AtomicArray docIdsToLoad; + private final SearchPhaseController searchPhaseController; - SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener); + SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, + Function nodeIdToDiscoveryNode, Map aliasFilter, + SearchPhaseController searchPhaseController, Executor executor, + SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, + shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; fetchResults = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length()); } @@ -82,7 +89,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResultProvider queryResult = firstResults.get(entry.index); - DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } @@ -125,7 +132,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index efd04035276..2bceccce385 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -51,7 +51,7 @@ public class TransportMultiSearchAction extends HandledTransportAction buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, String...concreteIndices) { + final Map aliasFilterMap = new HashMap<>(); + for (String index : concreteIndices) { + clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); + AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index, request.indices()); + if (aliasFilter != null) { + aliasFilterMap.put(index, aliasFilter); + } + } + return aliasFilterMap; } @Override protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + // pure paranoia if time goes backwards we are at least positive + final long startTimeInMillis = Math.max(0, System.currentTimeMillis()); + ClusterState clusterState = clusterService.state(); + clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + + // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name + // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead + // of just for the _search api + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest.indicesOptions(), + startTimeInMillis, searchRequest.indices()); + Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, concreteIndices); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), + searchRequest.indices()); + GroupShardsIterator shardIterators = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, + searchRequest.preference()); + failIfOverShardCountLimit(clusterService, shardIterators.size()); + // optimize search type for cases where there is only one shard group to search on try { - ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest); - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, - searchRequest.routing(), searchRequest.indices()); - int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap); - if (shardCount == 1) { + if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_AND_FETCH); } @@ -95,27 +129,37 @@ public class TransportSearchAction extends HandledTransportAction listener) { + private AbstractSearchAsyncAction searchAsyncAction(SearchRequest searchRequest, GroupShardsIterator shardIterators, long startTime, + ClusterState state, Map aliasFilter, + ActionListener listener) { + final Function nodesLookup = state.nodes()::get; + final long clusterStateVersion = state.version(); + Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); AbstractSearchAsyncAction searchAsyncAction; switch(searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: - searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; case QUERY_THEN_FETCH: - searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; case DFS_QUERY_AND_FETCH: - searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; case QUERY_AND_FETCH: - searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; default: throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); @@ -123,4 +167,15 @@ public class TransportSearchAction extends HandledTransportAction shardCountLimit) { + throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " + + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " + + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + + "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey() + + "] to a greater value if you really want to query that many shards at the same time."); + } + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index dcccf7e735f..a09a651086b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -32,11 +32,6 @@ import java.util.Base64; final class TransportSearchHelper { - static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request, - String[] filteringAliases, long nowInMillis) { - return new ShardSearchTransportRequest(request, shardRouting, numberOfShards, filteringAliases, nowInMillis); - } - static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) { return new InternalScrollSearchRequest(request, id); } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index f3b3d8d9839..c48fa1e8122 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -44,6 +44,7 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.Supplier; @@ -83,9 +84,9 @@ public abstract class TransportBroadcastActionvint: frequency (always returned) *
  • *
      - *
    • vint: position_1 (if positions == true)
    • - *
    • vint: startOffset_1 (if offset == true)
    • - *
    • vint: endOffset_1 (if offset == true)
    • - *
    • BytesRef: payload_1 (if payloads == true)
    • + *
    • vint: position_1 (if positions)
    • + *
    • vint: startOffset_1 (if offset)
    • + *
    • vint: endOffset_1 (if offset)
    • + *
    • BytesRef: payload_1 (if payloads)
    • *
    • ...
    • - *
    • vint: endOffset_freqency (if offset == true)
    • - *
    • BytesRef: payload_freqency (if payloads == true)
    • + *
    • vint: endOffset_freqency (if offset)
    • + *
    • BytesRef: payload_freqency (if payloads)
    • *
  • * */ diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index a660ede0ba8..b83713e3a6a 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.get.MultiGetRequest; @@ -56,7 +55,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; * Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are * required. */ -public class TermVectorsRequest extends SingleShardRequest implements DocumentRequest, RealtimeRequest { +public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { private String type; @@ -200,7 +199,6 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Returns the type of document to get the term vector for. */ - @Override public String type() { return type; } @@ -208,7 +206,6 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Returns the id of document the term vector is requested for. */ - @Override public String id() { return id; } @@ -250,18 +247,15 @@ public class TermVectorsRequest extends SingleShardRequest i /** * @return The routing for this request. */ - @Override public String routing() { return routing; } - @Override public TermVectorsRequest routing(String routing) { this.routing = routing; return this; } - @Override public String parent() { return parent; } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 11dabbd300a..14ef2058856 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; @@ -55,7 +55,7 @@ import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; public class UpdateRequest extends InstanceShardOperationRequest - implements DocumentRequest, WriteRequest { + implements DocWriteRequest, WriteRequest { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(UpdateRequest.class)); @@ -469,31 +469,33 @@ public class UpdateRequest extends InstanceShardOperationRequest return this.retryOnConflict; } - /** - * Sets the version, which will cause the index operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public UpdateRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } - /** - * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. - */ + @Override public UpdateRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } + @Override + public OpType opType() { + return OpType.UPDATE; + } + @Override public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { this.refreshPolicy = refreshPolicy; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 2d085df43b2..a0c834aa35f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -308,7 +308,8 @@ final class BootstrapCheck { static class MaxNumberOfThreadsCheck implements Check { - private final long maxNumberOfThreadsThreshold = 1 << 11; + // this should be plenty for machines up to 256 cores + private final long maxNumberOfThreadsThreshold = 1 << 12; @Override public boolean check() { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 10a29963b63..94cb4b8c8e8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -68,11 +68,6 @@ public class OperationRouting extends AbstractComponent { return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference); } - public int searchShardsCount(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { - final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); - return shards.size(); - } - public GroupShardsIterator searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); final Set set = new HashSet<>(shards.size()); diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 43c1df588b1..9b78c2fe5a7 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -20,12 +20,6 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; public final class BytesArray extends BytesReference { diff --git a/core/src/main/java/org/elasticsearch/common/cache/Cache.java b/core/src/main/java/org/elasticsearch/common/cache/Cache.java index a42d01ccf72..cf8b58d0271 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/core/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -67,13 +67,13 @@ import java.util.function.ToLongBiFunction; */ public class Cache { // positive if entries have an expiration - private long expireAfterAccess = -1; + private long expireAfterAccessNanos = -1; // true if entries can expire after access private boolean entriesExpireAfterAccess; // positive if entries have an expiration after write - private long expireAfterWrite = -1; + private long expireAfterWriteNanos = -1; // true if entries can expire after initial insertion private boolean entriesExpireAfterWrite; @@ -98,22 +98,32 @@ public class Cache { Cache() { } - void setExpireAfterAccess(long expireAfterAccess) { - if (expireAfterAccess <= 0) { - throw new IllegalArgumentException("expireAfterAccess <= 0"); + void setExpireAfterAccessNanos(long expireAfterAccessNanos) { + if (expireAfterAccessNanos <= 0) { + throw new IllegalArgumentException("expireAfterAccessNanos <= 0"); } - this.expireAfterAccess = expireAfterAccess; + this.expireAfterAccessNanos = expireAfterAccessNanos; this.entriesExpireAfterAccess = true; } - void setExpireAfterWrite(long expireAfterWrite) { - if (expireAfterWrite <= 0) { - throw new IllegalArgumentException("expireAfterWrite <= 0"); + // pkg-private for testing + long getExpireAfterAccessNanos() { + return this.expireAfterAccessNanos; + } + + void setExpireAfterWriteNanos(long expireAfterWriteNanos) { + if (expireAfterWriteNanos <= 0) { + throw new IllegalArgumentException("expireAfterWriteNanos <= 0"); } - this.expireAfterWrite = expireAfterWrite; + this.expireAfterWriteNanos = expireAfterWriteNanos; this.entriesExpireAfterWrite = true; } + // pkg-private for testing + long getExpireAfterWriteNanos() { + return this.expireAfterWriteNanos; + } + void setMaximumWeight(long maximumWeight) { if (maximumWeight < 0) { throw new IllegalArgumentException("maximumWeight < 0"); @@ -696,8 +706,8 @@ public class Cache { } private boolean isExpired(Entry entry, long now) { - return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccess) || - (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWrite); + return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccessNanos) || + (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWriteNanos); } private boolean unlink(Entry entry) { diff --git a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java index ffb0e591180..67c8d508ba5 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java @@ -19,13 +19,15 @@ package org.elasticsearch.common.cache; +import org.elasticsearch.common.unit.TimeValue; + import java.util.Objects; import java.util.function.ToLongBiFunction; public class CacheBuilder { private long maximumWeight = -1; - private long expireAfterAccess = -1; - private long expireAfterWrite = -1; + private long expireAfterAccessNanos = -1; + private long expireAfterWriteNanos = -1; private ToLongBiFunction weigher; private RemovalListener removalListener; @@ -44,19 +46,35 @@ public class CacheBuilder { return this; } - public CacheBuilder setExpireAfterAccess(long expireAfterAccess) { - if (expireAfterAccess <= 0) { + /** + * Sets the amount of time before an entry in the cache expires after it was last accessed. + * + * @param expireAfterAccess The amount of time before an entry expires after it was last accessed. Must not be {@code null} and must + * be greater than 0. + */ + public CacheBuilder setExpireAfterAccess(TimeValue expireAfterAccess) { + Objects.requireNonNull(expireAfterAccess); + final long expireAfterAccessNanos = expireAfterAccess.getNanos(); + if (expireAfterAccessNanos <= 0) { throw new IllegalArgumentException("expireAfterAccess <= 0"); } - this.expireAfterAccess = expireAfterAccess; + this.expireAfterAccessNanos = expireAfterAccessNanos; return this; } - public CacheBuilder setExpireAfterWrite(long expireAfterWrite) { - if (expireAfterWrite <= 0) { + /** + * Sets the amount of time before an entry in the cache expires after it was written. + * + * @param expireAfterWrite The amount of time before an entry expires after it was written. Must not be {@code null} and must be + * greater than 0. + */ + public CacheBuilder setExpireAfterWrite(TimeValue expireAfterWrite) { + Objects.requireNonNull(expireAfterWrite); + final long expireAfterWriteNanos = expireAfterWrite.getNanos(); + if (expireAfterWriteNanos <= 0) { throw new IllegalArgumentException("expireAfterWrite <= 0"); } - this.expireAfterWrite = expireAfterWrite; + this.expireAfterWriteNanos = expireAfterWriteNanos; return this; } @@ -77,11 +95,11 @@ public class CacheBuilder { if (maximumWeight != -1) { cache.setMaximumWeight(maximumWeight); } - if (expireAfterAccess != -1) { - cache.setExpireAfterAccess(expireAfterAccess); + if (expireAfterAccessNanos != -1) { + cache.setExpireAfterAccessNanos(expireAfterAccessNanos); } - if (expireAfterWrite != -1) { - cache.setExpireAfterWrite(expireAfterWrite); + if (expireAfterWriteNanos != -1) { + cache.setExpireAfterWriteNanos(expireAfterWriteNanos); } if (weigher != null) { cache.setWeigher(weigher); diff --git a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java index aef5db31e2d..f567264d26e 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java +++ b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java @@ -80,4 +80,11 @@ public class PortsRange { public interface PortCallback { boolean onPortNumber(int portNumber); } + + @Override + public String toString() { + return "PortsRange{" + + "portRange='" + portRange + '\'' + + '}'; + } } diff --git a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java index 6a15a3d9000..728db17c2a4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -397,7 +397,7 @@ public class BigArrays implements Releasable { void adjustBreaker(long delta) { if (this.breakerService != null) { CircuitBreaker breaker = this.breakerService.getBreaker(CircuitBreaker.REQUEST); - if (this.checkBreaker == true) { + if (this.checkBreaker) { // checking breaker means potentially tripping, but it doesn't // have to if the delta is negative if (delta > 0) { diff --git a/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 947aad48737..f40938b8ec0 100644 --- a/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -69,7 +69,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable { super(settings); final Type type = TYPE_SETTING .get(settings); final long limit = LIMIT_HEAP_SETTING .get(settings).getBytes(); - final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + final int availableProcessors = EsExecutors.numberOfProcessors(settings); // We have a global amount of memory that we need to divide across data types. // Since some types are more useful than other ones we give them different weights. diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 2d682648ca4..fbb9f65414a 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -40,16 +40,17 @@ public class EsExecutors { * This is used to adjust thread pools sizes etc. per node. */ public static final Setting PROCESSORS_SETTING = - Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope); + Setting.intSetting("processors", Runtime.getRuntime().availableProcessors(), 1, Property.NodeScope); /** - * Returns the number of processors available but at most 32. + * Returns the number of available processors. Defaults to + * {@link Runtime#availableProcessors()} but can be overridden by passing a {@link Settings} + * instance with the key "processors" set to the desired value. + * + * @param settings a {@link Settings} instance from which to derive the available processors + * @return the number of available processors */ - public static int boundedNumberOfProcessors(Settings settings) { - /* This relates to issues where machines with large number of cores - * ie. >= 48 create too many threads and run into OOM see #3478 - * We just use an 32 core upper-bound here to not stress the system - * too much with too many created threads */ + public static int numberOfProcessors(final Settings settings) { return PROCESSORS_SETTING.get(settings); } diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 42c40034b10..37277586bf7 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -124,7 +124,7 @@ public abstract class AsyncShardFetch implements Rel } // if we are still fetching, return null to indicate it - if (hasAnyNodeFetching(cache) == true) { + if (hasAnyNodeFetching(cache)) { return new FetchResult<>(shardId, null, emptySet(), emptySet()); } else { // nothing to fetch, yay, build the return value @@ -137,7 +137,7 @@ public abstract class AsyncShardFetch implements Rel DiscoveryNode node = nodes.get(nodeId); if (node != null) { - if (nodeEntry.isFailed() == true) { + if (nodeEntry.isFailed()) { // if its failed, remove it from the list of nodes, so if this run doesn't work // we try again next round to fetch it again it.remove(); @@ -361,7 +361,7 @@ public abstract class AsyncShardFetch implements Rel } void doneFetching(T value) { - assert fetching == true : "setting value but not in fetching mode"; + assert fetching : "setting value but not in fetching mode"; assert failure == null : "setting value when failure already set"; this.valueSet = true; this.value = value; @@ -369,7 +369,7 @@ public abstract class AsyncShardFetch implements Rel } void doneFetching(Throwable failure) { - assert fetching == true : "setting value but not in fetching mode"; + assert fetching : "setting value but not in fetching mode"; assert valueSet == false : "setting failure when already set value"; assert failure != null : "setting failure can't be null"; this.failure = failure; @@ -377,7 +377,7 @@ public abstract class AsyncShardFetch implements Rel } void restartFetching() { - assert fetching == true : "restarting fetching, but not in fetching mode"; + assert fetching : "restarting fetching, but not in fetching mode"; assert valueSet == false : "value can't be set when restarting fetching"; assert failure == null : "failure can't be set when restarting fetching"; this.fetching = false; @@ -388,7 +388,7 @@ public abstract class AsyncShardFetch implements Rel } boolean hasData() { - return valueSet == true || failure != null; + return valueSet || failure != null; } Throwable getFailure() { @@ -399,7 +399,7 @@ public abstract class AsyncShardFetch implements Rel @Nullable T getValue() { assert failure == null : "trying to fetch value, but its marked as failed, check isFailed"; - assert valueSet == true : "value is not set, hasn't been fetched yet"; + assert valueSet : "value is not set, hasn't been fetched yet"; return value; } } diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 370778898fc..0c829e88182 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -153,7 +153,7 @@ public class DanglingIndicesState extends AbstractComponent { * for allocation. */ private void allocateDanglingIndices() { - if (danglingIndices.isEmpty() == true) { + if (danglingIndices.isEmpty()) { return; } try { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index d75a864d8dd..65a2876b3aa 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -85,7 +85,7 @@ public class GatewayAllocator extends AbstractComponent { boolean cleanCache = false; DiscoveryNode localNode = event.state().nodes().getLocalNode(); if (localNode != null) { - if (localNode.isMasterNode() == true && event.localNodeMaster() == false) { + if (localNode.isMasterNode() && event.localNodeMaster() == false) { cleanCache = true; } } else { @@ -174,7 +174,7 @@ public class GatewayAllocator extends AbstractComponent { AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); - if (shardState.hasData() == true) { + if (shardState.hasData()) { shardState.processAllocation(allocation); } return shardState; @@ -199,7 +199,7 @@ public class GatewayAllocator extends AbstractComponent { } AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); - if (shardStores.hasData() == true) { + if (shardStores.hasData()) { shardStores.processAllocation(allocation); } return shardStores; diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a05e85299a8..b609d0bacae 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -192,7 +192,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL Set relevantIndices; if (isDataOnlyNode(state)) { relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices); - } else if (state.nodes().getLocalNode().isMasterNode() == true) { + } else if (state.nodes().getLocalNode().isMasterNode()) { relevantIndices = getRelevantIndicesForMasterEligibleNode(state); } else { relevantIndices = Collections.emptySet(); diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 15acd625248..7d8e8327d39 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -195,7 +195,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { "the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]", decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain)); - } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { + } else if (nodesToAllocate.throttleNodeShards.isEmpty() && !nodesToAllocate.noNodeShards.isEmpty()) { // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard // can be force-allocated to one of the nodes. final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate( diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 620fd354327..390f3cb379e 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -65,7 +65,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { List shardCancellationActions = new ArrayList<>(); for (RoutingNode routingNode : routingNodes) { for (ShardRouting shard : routingNode) { - if (shard.primary() == true) { + if (shard.primary()) { continue; } if (shard.initializing() == false) { @@ -109,7 +109,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { } if (currentNode.equals(nodeWithHighestMatch) == false && Objects.equals(currentSyncId, primaryStore.syncId()) == false - && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) { + && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch)) { // we found a better match that has a full sync id match, the existing allocation is not fully synced // so we found a better one, cancel this one logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 8101397a45c..ee1f1d1c976 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -22,25 +22,18 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -55,7 +48,6 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexSearcherWrapper; @@ -70,8 +62,6 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.AliasFilterParsingException; -import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -89,7 +79,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -476,7 +465,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust * used for rewriting since it does not know about the current {@link IndexReader}. */ public QueryShardContext newQueryShardContext() { - return newQueryShardContext(0, null, threadPool::estimatedTimeInMillis); + return newQueryShardContext(0, null, System::currentTimeMillis); } /** @@ -598,64 +587,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } - /** - * Returns the filter associated with listed filtering aliases. - *

    - * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. - * Returns null if no filtering is required.

    - */ - public Query aliasFilter(QueryShardContext context, String... aliasNames) { - if (aliasNames == null || aliasNames.length == 0) { - return null; - } - final ImmutableOpenMap aliases = indexSettings.getIndexMetaData().getAliases(); - if (aliasNames.length == 1) { - AliasMetaData alias = aliases.get(aliasNames[0]); - if (alias == null) { - // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(index(), aliasNames[0], "Unknown alias name was passed to alias Filter"); - } - return parse(alias, context); - } else { - // we need to bench here a bit, to see maybe it makes sense to use OrFilter - BooleanQuery.Builder combined = new BooleanQuery.Builder(); - for (String aliasName : aliasNames) { - AliasMetaData alias = aliases.get(aliasName); - if (alias == null) { - // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0], - "Unknown alias name was passed to alias Filter"); - } - Query parsedFilter = parse(alias, context); - if (parsedFilter != null) { - combined.add(parsedFilter, BooleanClause.Occur.SHOULD); - } else { - // The filter might be null only if filter was removed after filteringAliases was called - return null; - } - } - return combined.build(); - } - } - - private Query parse(AliasMetaData alias, QueryShardContext shardContext) { - if (alias.filter() == null) { - return null; - } - try { - byte[] filterSource = alias.filter().uncompressed(); - try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) { - Optional innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder(); - if (innerQueryBuilder.isPresent()) { - return shardContext.toFilter(innerQueryBuilder.get()).query(); - } - return null; - } - } catch (IOException ex) { - throw new AliasFilterParsingException(shardContext.index(), alias.getAlias(), "Invalid alias filter", ex); - } - } - public IndexMetaData getMetaData() { return indexSettings.getIndexMetaData(); } diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java index e04d3dc7a49..3707d9259b1 100644 --- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java @@ -54,7 +54,7 @@ public final class MergeSchedulerConfig { public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", - (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), + (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.numberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic, Property.IndexScope); public static final Setting MAX_MERGE_COUNT_SETTING = diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java index d484c503c2b..18313f32745 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java @@ -68,7 +68,7 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData estimator.afterLoad(null, data.ramBytesUsed()); return data; } - return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ? + return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java index 99ca07b06bf..fc46a08ce1a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java @@ -297,7 +297,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement validPoint = true; } - if (coerce.value() == true && validPoint == false) { + if (coerce.value() && validPoint == false) { // by setting coerce to false we are assuming all geopoints are already in a valid coordinate system // thus this extra step can be skipped GeoUtils.normalizePoint(point, true, true); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 06928566424..1c54c2136c9 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -39,6 +39,7 @@ public abstract class Mapper implements ToXContent, Iterable { private final ContentPath contentPath; public BuilderContext(Settings indexSettings, ContentPath contentPath) { + Objects.requireNonNull(indexSettings, "indexSettings is required"); this.contentPath = contentPath; this.indexSettings = indexSettings; } @@ -47,16 +48,11 @@ public abstract class Mapper implements ToXContent, Iterable { return this.contentPath; } - @Nullable public Settings indexSettings() { return this.indexSettings; } - @Nullable public Version indexCreatedVersion() { - if (indexSettings == null) { - return null; - } return Version.indexCreated(indexSettings); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java index 69c49299e49..3f6ca1b7f5a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java @@ -21,12 +21,10 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.common.lucene.BytesRefs; import java.util.Collection; import java.util.Collections; -import java.util.List; public final class Uid { diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 26b979e45fc..1cfe2acb246 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -299,7 +299,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder queries = new ArrayList<>(); Term[] terms = new Term[blendedFields.length]; float[] blendedBoost = new float[blendedFields.length]; @@ -249,7 +250,7 @@ public class MultiMatchQuery extends MatchQuery { for (FieldAndFieldType ft : blendedFields) { Query query; try { - query = ft.fieldType.termQuery(value, null); + query = ft.fieldType.termQuery(value, context); } catch (IllegalArgumentException e) { // the query expects a certain class of values such as numbers // of ip addresses and the value can't be parsed, so ignore this diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index 11023a6a135..fcab0cf3fc7 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -65,7 +65,7 @@ public final class ShadowIndexShard extends IndexShard { */ @Override public void updateRoutingEntry(ShardRouting newRouting) throws IOException { - if (newRouting.primary() == true) {// becoming a primary + if (newRouting.primary()) {// becoming a primary throw new IllegalStateException("can't promote shard to primary"); } super.updateRoutingEntry(newRouting); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index ff3713a374f..a08f9ca1ad4 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -47,7 +47,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -90,7 +89,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo CacheBuilder cacheBuilder = CacheBuilder.builder() .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); if (expire != null) { - cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); + cacheBuilder.setExpireAfterAccess(expire); } cache = cacheBuilder.build(); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 202d303ce8b..4c7e541aafa 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -65,6 +66,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -84,6 +86,7 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.merge.MergeStats; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -106,6 +109,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QueryPhase; @@ -128,6 +132,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -1207,4 +1212,12 @@ public class IndicesService extends AbstractLifecycleComponent (Index index, IndexSettings indexSettings) -> canDeleteIndexContents(index, indexSettings); private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true; + public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + Function factory = + (parser) -> new QueryParseContext(indicesQueriesRegistry, parser, new ParseFieldMatcher(settings)); + String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions); + IndexMetaData indexMetaData = state.metaData().index(index); + return new AliasFilter(ShardSearchRequest.parseAliasFilter(factory, indexMetaData, aliases), aliases); + } + } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index e7146636534..6c701e59c90 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable> actionRequests, + public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (ActionRequest actionRequest : actionRequests) { + for (DocWriteRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index cb67eef852c..f37daddbb06 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -41,7 +41,7 @@ public class OsService extends AbstractComponent { super(settings); this.probe = OsProbe.getInstance(); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); - this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.boundedNumberOfProcessors(settings)); + this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.numberOfProcessors(settings)); this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); logger.debug("using refresh_interval [{}]", refreshInterval); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index a3f211ced35..82b10361153 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -82,7 +82,7 @@ public class RestIndexAction extends BaseRestHandler { indexRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } if (sOpType != null) { - indexRequest.opType(IndexRequest.OpType.fromString(sOpType)); + indexRequest.opType(sOpType); } return channel -> diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 24cb816fb15..2d6e07d12ee 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -136,7 +136,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust TimeValue cacheExpire = SCRIPT_CACHE_EXPIRE_SETTING.get(settings); if (cacheExpire.getNanos() != 0) { - cacheBuilder.setExpireAfterAccess(cacheExpire.nanos()); + cacheBuilder.setExpireAfterAccess(cacheExpire); } logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire); diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 006341dc046..a1c140639c1 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; @@ -74,6 +75,7 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -227,7 +229,12 @@ final class DefaultSearchContext extends SearchContext { } // initialize the filtering alias based on the provided filters - aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases()); + try { + final QueryBuilder queryBuilder = request.filteringAliases(); + aliasFilter = queryBuilder == null ? null : queryBuilder.toFilter(queryShardContext); + } catch (IOException e) { + throw new UncheckedIOException(e); + } if (query() == null) { parsedQuery(ParsedQuery.parsedMatchAllQuery()); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 47949573dd3..c12d0ff5263 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -22,8 +22,10 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.ObjectFloatHashMap; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -64,6 +66,7 @@ import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; @@ -262,7 +265,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv // execution exception can happen while loading the cache, strip it if (e instanceof ExecutionException) { e = (e.getCause() == null || e.getCause() instanceof Exception) ? - (Exception) e.getCause() : new ElasticsearchException(e.getCause()); + (Exception) e.getCause() : new ElasticsearchException(e.getCause()); } operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); @@ -449,7 +452,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } operationListener.onFetchPhase(context, System.nanoTime() - time2); return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), - context.shardTarget()); + context.shardTarget()); } catch (Exception e) { logger.trace("Fetch phase failed", e); processFailure(context, e); @@ -518,11 +521,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher); try { - // we clone the query shard context here just for rewriting otherwise we - // might end up with incorrect state since we are using now() or script services - // during rewrite and normalized / evaluate templates etc. - request.rewrite(new QueryShardContext(context.getQueryShardContext())); - assert context.getQueryShardContext().isCachable(); if (request.scroll() != null) { context.scrollContext(new ScrollContext()); context.scrollContext().scroll = request.scroll(); @@ -556,16 +554,30 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv return context; } - public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) { + public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) + throws IOException { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().getId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, - indexService, - indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, - timeout, fetchPhase); + final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, + engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, + timeout, fetchPhase); + boolean success = false; + try { + // we clone the query shard context here just for rewriting otherwise we + // might end up with incorrect state since we are using now() or script services + // during rewrite and normalized / evaluate templates etc. + request.rewrite(new QueryShardContext(searchContext.getQueryShardContext())); + assert searchContext.getQueryShardContext().isCachable(); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(searchContext); + } + } + return searchContext; } private void freeAllContextForIndex(Index index) { @@ -730,7 +742,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv if (source.scriptFields() != null) { for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { SearchScript searchScript = scriptService.search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, - Collections.emptyMap()); + Collections.emptyMap()); context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure())); } } @@ -853,10 +865,14 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } if ((time - lastAccessTime > context.keepAlive())) { logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time, - lastAccessTime, context.keepAlive()); + lastAccessTime, context.keepAlive()); freeContext(context.id()); } } } } + + public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + return indicesService.buildAliasFilter(state, index, expressions); + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java new file mode 100644 index 00000000000..9d22729b7a0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of. + */ +public final class AliasFilter implements Writeable { + public static final Version V_5_1_0 = Version.fromId(5010099); + private final String[] aliases; + private final QueryBuilder filter; + private final boolean reparseAliases; + + public AliasFilter(QueryBuilder filter, String... aliases) { + this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases; + this.filter = filter; + reparseAliases = false; // no bwc here - we only do this if we parse the filter + } + + public AliasFilter(StreamInput input) throws IOException { + aliases = input.readStringArray(); + if (input.getVersion().onOrAfter(V_5_1_0)) { + filter = input.readOptionalNamedWriteable(QueryBuilder.class); + reparseAliases = false; + } else { + reparseAliases = true; // alright we read from 5.0 + filter = null; + } + } + + private QueryBuilder reparseFilter(QueryRewriteContext context) { + if (reparseAliases) { + // we are processing a filter received from a 5.0 node - we need to reparse this on the executing node + final IndexMetaData indexMetaData = context.getIndexSettings().getIndexMetaData(); + return ShardSearchRequest.parseAliasFilter(context::newParseContext, indexMetaData, aliases); + } + return filter; + } + + AliasFilter rewrite(QueryRewriteContext context) throws IOException { + QueryBuilder queryBuilder = reparseFilter(context); + if (queryBuilder != null) { + return new AliasFilter(QueryBuilder.rewriteQuery(queryBuilder, context), aliases); + } + return new AliasFilter(filter, aliases); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(aliases); + if (out.getVersion().onOrAfter(V_5_1_0)) { + out.writeOptionalNamedWriteable(filter); + } + } + + /** + * Returns the aliases patters that are used to compose the {@link QueryBuilder} + * returned from {@link #getQueryBuilder()} + */ + public String[] getAliases() { + return aliases; + } + + /** + * Returns the alias filter {@link QueryBuilder} or null if there is no such filter + */ + public QueryBuilder getQueryBuilder() { + if (reparseAliases) { + // this is only for BWC since 5.0 still only sends aliases so this must be rewritten on the executing node + // if we talk to an older node we also only forward/write the string array which is compatible with the consumers + // in 5.0 see ExplainRequest and QueryValidationRequest + throw new IllegalStateException("alias filter for aliases: " + Arrays.toString(aliases) + " must be rewritten first"); + } + return filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AliasFilter that = (AliasFilter) o; + return reparseAliases == that.reparseAliases && + Arrays.equals(aliases, that.aliases) && + Objects.equals(filter, that.filter); + } + + @Override + public int hashCode() { + return Objects.hash(aliases, filter, reparseAliases); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0d6148011ed..0fe10fa71cd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -62,7 +63,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private SearchType searchType; private Scroll scroll; private String[] types = Strings.EMPTY_ARRAY; - private String[] filteringAliases; + private AliasFilter aliasFilter; private SearchSourceBuilder source; private Boolean requestCache; private long nowInMillis; @@ -73,29 +74,29 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, - String[] filteringAliases, long nowInMillis) { + AliasFilter aliasFilter, long nowInMillis) { this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.requestCache()); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter); this.scroll = searchRequest.scroll(); - this.filteringAliases = filteringAliases; this.nowInMillis = nowInMillis; } - public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) { + public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, AliasFilter aliasFilter) { this.types = types; this.nowInMillis = nowInMillis; - this.filteringAliases = filteringAliases; + this.aliasFilter = aliasFilter; this.shardId = shardId; } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, - Boolean requestCache) { + Boolean requestCache, AliasFilter aliasFilter) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; this.types = types; this.requestCache = requestCache; + this.aliasFilter = aliasFilter; } @@ -130,8 +131,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } @Override - public String[] filteringAliases() { - return filteringAliases; + public QueryBuilder filteringAliases() { + return aliasFilter.getQueryBuilder(); } @Override @@ -166,7 +167,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); - filteringAliases = in.readStringArray(); + aliasFilter = new AliasFilter(in); nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); } @@ -180,7 +181,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); out.writeStringArray(types); - out.writeStringArrayNullable(filteringAliases); + aliasFilter.writeTo(out); if (!asKey) { out.writeVLong(nowInMillis); } @@ -200,6 +201,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public void rewrite(QueryShardContext context) throws IOException { SearchSourceBuilder source = this.source; SearchSourceBuilder rewritten = null; + aliasFilter = aliasFilter.rewrite(context); while (rewritten != source) { rewritten = source.rewrite(context); source = rewritten; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 6c237322f04..01852506cdc 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -20,13 +20,26 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.AliasFilterParsingException; +import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; +import java.util.Optional; +import java.util.function.Function; /** * Shard level request that represents a search. @@ -47,7 +60,7 @@ public interface ShardSearchRequest { SearchType searchType(); - String[] filteringAliases(); + QueryBuilder filteringAliases(); long nowInMillis(); @@ -76,4 +89,64 @@ public interface ShardSearchRequest { * QueryBuilder. */ void rewrite(QueryShardContext context) throws IOException; + + /** + * Returns the filter associated with listed filtering aliases. + *

    + * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. + * Returns null if no filtering is required.

    + */ + static QueryBuilder parseAliasFilter(Function contextFactory, + IndexMetaData metaData, String... aliasNames) { + if (aliasNames == null || aliasNames.length == 0) { + return null; + } + Index index = metaData.getIndex(); + ImmutableOpenMap aliases = metaData.getAliases(); + Function parserFunction = (alias) -> { + if (alias.filter() == null) { + return null; + } + try { + byte[] filterSource = alias.filter().uncompressed(); + try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) { + Optional innerQueryBuilder = contextFactory.apply(parser).parseInnerQueryBuilder(); + if (innerQueryBuilder.isPresent()) { + return innerQueryBuilder.get(); + } + return null; + } + } catch (IOException ex) { + throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex); + } + }; + if (aliasNames.length == 1) { + AliasMetaData alias = aliases.get(aliasNames[0]); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter"); + } + return parserFunction.apply(alias); + } else { + // we need to bench here a bit, to see maybe it makes sense to use OrFilter + BoolQueryBuilder combined = new BoolQueryBuilder(); + for (String aliasName : aliasNames) { + AliasMetaData alias = aliases.get(aliasName); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], + "Unknown alias name was passed to alias Filter"); + } + QueryBuilder parsedFilter = parserFunction.apply(alias); + if (parsedFilter != null) { + combined.should(parsedFilter); + } else { + // The filter might be null only if filter was removed after filteringAliases was called + return null; + } + } + return combined; + } + } + } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 93013b94b36..1a92257dc34 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -51,8 +52,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } public ShardSearchTransportRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, - String[] filteringAliases, long nowInMillis) { - this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, filteringAliases, nowInMillis); + AliasFilter aliasFilter, long nowInMillis) { + this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, aliasFilter, nowInMillis); this.originalIndices = new OriginalIndices(searchRequest); } @@ -104,7 +105,7 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } @Override - public String[] filteringAliases() { + public QueryBuilder filteringAliases() { return shardSearchLocalRequest.filteringAliases(); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 6c3d8b0a537..c3511ef0ae1 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -452,7 +452,7 @@ public class GeoDistanceSortBuilder extends SortBuilder geoDistance = GeoDistance.fromString(parser.text()); } else if (parseFieldMatcher.match(currentName, COERCE_FIELD)) { coerce = parser.booleanValue(); - if (coerce == true) { + if (coerce) { ignoreMalformed = true; } } else if (parseFieldMatcher.match(currentName, IGNORE_MALFORMED_FIELD)) { diff --git a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index de7dbbaefc9..9e5469fd16a 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.Node; @@ -79,7 +78,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder builders = new HashMap<>(); - final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + final int availableProcessors = EsExecutors.numberOfProcessors(settings); final int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors); final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors); final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 862cccab318..8ba5dc4868f 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -19,9 +19,11 @@ package org.elasticsearch; +import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; @@ -279,4 +281,19 @@ public class VersionTests extends ESTestCase { } } } + private static final Version V_20_0_0_UNRELEASED = new Version(20000099, Version.CURRENT.luceneVersion); + + // see comment in Version.java about this test + public void testUnknownVersions() { + assertUnknownVersion(V_20_0_0_UNRELEASED); + expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT)); + assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant + // once we released 5.0.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(ShardValidateQueryRequestTests.V_5_0_0); + } + + public static void assertUnknownVersion(Version version) { + assertFalse("Version " + version + " has been releaed don't use a new instance of this version", + VersionUtils.allVersions().contains(version)); + } } diff --git a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java new file mode 100644 index 00000000000..ad2cabefdbc --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +public class ExplainRequestTests extends ESTestCase { + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + + public void testSerialize() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ExplainRequest request = new ExplainRequest("index", "type", "id"); + request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"})); + request.preference("the_preference"); + request.query(QueryBuilders.termQuery("field", "value")); + request.storedFields(new String[] {"field1", "field2"}); + request.routing("some_routing"); + request.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + ExplainRequest readRequest = new ExplainRequest(); + readRequest.readFrom(in); + assertEquals(request.filteringAlias(), readRequest.filteringAlias()); + assertArrayEquals(request.storedFields(), readRequest.storedFields()); + assertEquals(request.preference(), readRequest.preference()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.routing(), readRequest.routing()); + assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); + } + } + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + ExplainRequest request = new ExplainRequest("index", "type", "id"); + request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"})); + request.preference("the_preference"); + request.query(QueryBuilders.termQuery("field", "value")); + request.storedFields(new String[] {"field1", "field2"}); + request.routing("some_routing"); + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAABBWluZGV4BHR5cGUCaWQBDHNvbWVfcm91dGluZwEOdGhlX3ByZWZlcmVuY2UEdGVybT" + + "+AAAAABWZpZWxkFQV2YWx1ZQIGYWxpYXMwBmFsaWFzMQECBmZpZWxkMQZmaWVsZDIBAQEIZmllbGQxLioBCGZpZWxkMi4qAA")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + ExplainRequest readRequest = new ExplainRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertArrayEquals(request.filteringAlias().getAliases(), readRequest.filteringAlias().getAliases()); + expectThrows(IllegalStateException.class, () -> readRequest.filteringAlias().getQueryBuilder()); + assertArrayEquals(request.storedFields(), readRequest.storedFields()); + assertEquals(request.preference(), readRequest.preference()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.routing(), readRequest.routing()); + assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java new file mode 100644 index 00000000000..34c6999f4e8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.validate.query.ShardValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +public class ShardValidateQueryRequestTests extends ESTestCase { + public static final Version V_5_0_0 = Version.fromId(5000099); + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + + public void testSerialize() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); + validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); + validateQueryRequest.rewrite(true); + validateQueryRequest.explain(false); + validateQueryRequest.types("type1", "type2"); + ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), + new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); + request.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); + readRequest.readFrom(in); + assertEquals(request.filteringAliases(), readRequest.filteringAliases()); + assertArrayEquals(request.types(), readRequest.types()); + assertEquals(request.explain(), readRequest.explain()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.rewrite(), readRequest.rewrite()); + assertEquals(request.shardId(), readRequest.shardId()); + } + } + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); + validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); + validateQueryRequest.rewrite(true); + validateQueryRequest.explain(false); + validateQueryRequest.types("type1", "type2"); + ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), + new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAVpbmRleAZmb29iYXIBAQdpbmRpY2VzBAR0ZXJtP4AAAAAFZmllbGQVBXZhbHVlAgV0eXBlMQV0eXBlMgIGYWxpYXMwBmFsaWFzMQABAA")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(V_5_0_0); + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertArrayEquals(request.filteringAliases().getAliases(), readRequest.filteringAliases().getAliases()); + expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases().getQueryBuilder()); + assertArrayEquals(request.types(), readRequest.types()); + assertEquals(request.explain(), readRequest.explain()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.rewrite(), readRequest.rewrite()); + assertEquals(request.shardId(), readRequest.shardId()); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index a74a3879bef..57aa0cbb9a4 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List> requests = new ArrayList<>(); + List requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index c9fa93f76db..e2d3e87c210 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -47,6 +47,7 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.function.Function; +import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -309,7 +310,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(1L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(1)); @@ -347,7 +348,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(2L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(2)); @@ -371,7 +372,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(3L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } } @@ -388,7 +389,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } builder = client().prepareBulk(); @@ -404,7 +405,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); for (int j = 0; j < 5; j++) { GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).get(); assertThat(getResponse.isExists(), equalTo(false)); @@ -747,12 +748,12 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertNoFailures(indexBulkItemResponse); assertThat(bulkItemResponse.getItems().length, is(6)); - assertThat(bulkItemResponse.getItems()[0].getOpType(), is("index")); - assertThat(bulkItemResponse.getItems()[1].getOpType(), is("index")); - assertThat(bulkItemResponse.getItems()[2].getOpType(), is("update")); - assertThat(bulkItemResponse.getItems()[3].getOpType(), is("update")); - assertThat(bulkItemResponse.getItems()[4].getOpType(), is("delete")); - assertThat(bulkItemResponse.getItems()[5].getOpType(), is("delete")); + assertThat(bulkItemResponse.getItems()[0].getOpType(), is(OpType.INDEX)); + assertThat(bulkItemResponse.getItems()[1].getOpType(), is(OpType.INDEX)); + assertThat(bulkItemResponse.getItems()[2].getOpType(), is(OpType.UPDATE)); + assertThat(bulkItemResponse.getItems()[3].getOpType(), is(OpType.UPDATE)); + assertThat(bulkItemResponse.getItems()[4].getOpType(), is(OpType.DELETE)); + assertThat(bulkItemResponse.getItems()[5].getOpType(), is(OpType.DELETE)); } private static String indexOrAlias() { @@ -797,9 +798,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(bulkResponse.hasFailures(), is(true)); BulkItemResponse[] responseItems = bulkResponse.getItems(); assertThat(responseItems.length, is(3)); - assertThat(responseItems[0].getOpType(), is("index")); - assertThat(responseItems[1].getOpType(), is("update")); - assertThat(responseItems[2].getOpType(), is("delete")); + assertThat(responseItems[0].getOpType(), is(OpType.INDEX)); + assertThat(responseItems[1].getOpType(), is(OpType.UPDATE)); + assertThat(responseItems[2].getOpType(), is(OpType.DELETE)); } // issue 9821 @@ -809,9 +810,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { .add(client().prepareUpdate().setIndex("INVALID.NAME").setType("type1").setId("1").setDoc("field", randomInt())) .add(client().prepareDelete().setIndex("INVALID.NAME").setType("type1").setId("1")).get(); assertThat(bulkResponse.getItems().length, is(3)); - assertThat(bulkResponse.getItems()[0].getOpType(), is("index")); - assertThat(bulkResponse.getItems()[1].getOpType(), is("update")); - assertThat(bulkResponse.getItems()[2].getOpType(), is("delete")); + assertThat(bulkResponse.getItems()[0].getOpType(), is(OpType.INDEX)); + assertThat(bulkResponse.getItems()[1].getOpType(), is(OpType.UPDATE)); + assertThat(bulkResponse.getItems()[2].getOpType(), is(OpType.DELETE)); } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 4fa640b3adc..c0e735ec33c 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; @@ -212,11 +213,11 @@ public class RetryTests extends ESTestCase { } private BulkItemResponse successfulResponse() { - return new BulkItemResponse(1, "update", new DeleteResponse()); + return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse()); } private BulkItemResponse failedResponse() { - return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); } } } diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 1b37a73d6f1..93bdd2dfd7b 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.unit.TimeValue; @@ -44,18 +45,24 @@ public class IndexRequestTests extends ESTestCase { String createUpper = "CREATE"; String indexUpper = "INDEX"; - assertThat(IndexRequest.OpType.fromString(create), equalTo(IndexRequest.OpType.CREATE)); - assertThat(IndexRequest.OpType.fromString(index), equalTo(IndexRequest.OpType.INDEX)); - assertThat(IndexRequest.OpType.fromString(createUpper), equalTo(IndexRequest.OpType.CREATE)); - assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX)); + IndexRequest indexRequest = new IndexRequest(""); + indexRequest.opType(create); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE)); + indexRequest.opType(createUpper); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE)); + indexRequest.opType(index); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX)); + indexRequest.opType(indexUpper); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX)); } public void testReadBogusString() { try { - IndexRequest.OpType.fromString("foobar"); + IndexRequest indexRequest = new IndexRequest(""); + indexRequest.opType("foobar"); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("opType [foobar] not allowed")); + assertThat(e.getMessage(), equalTo("opType must be 'create' or 'index', found: [foobar]")); } } diff --git a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java index 9ee5036131d..7bd4f7fb7a6 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; */ import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -116,10 +116,10 @@ public class BulkRequestModifierTests extends ESTestCase { }); List originalResponses = new ArrayList<>(); - for (ActionRequest actionRequest : bulkRequest.requests()) { + for (DocWriteRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, true); - originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType().lowercase(), indexResponse)); + originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); } bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java index 1316c87e2aa..9dbef147c01 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { if (rarely()) { - ActionRequest request; + DocWriteRequest request; if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); } else { @@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase { verifyZeroInteractions(actionListener); int assertedRequests = 0; - for (ActionRequest actionRequest : bulkRequest.requests()) { + for (DocWriteRequest actionRequest : bulkRequest.requests()) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; assertThat(indexRequest.sourceAsMap().size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java new file mode 100644 index 00000000000..1aafa1d343b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.PlainShardIterator; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ShardSearchTransportRequest; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class SearchAsyncActionTests extends ESTestCase { + + public void testFanOutAndCollect() throws InterruptedException { + SearchRequest request = new SearchRequest(); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference response = new AtomicReference<>(); + ActionListener responseListener = new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + response.set((TestSearchResponse) searchResponse); + } + + @Override + public void onFailure(Exception e) { + logger.warn("test failed", e); + fail(e.getMessage()); + } + }; + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + + Map> nodeToContextMap = new HashMap<>(); + AtomicInteger contextIdGenerator = new AtomicInteger(0); + GroupShardsIterator shardsIter = getShardsIter("idx", randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode); + AtomicInteger numFreedContext = new AtomicInteger(); + SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null) { + @Override + public void sendFreeContext(DiscoveryNode node, long contextId, SearchRequest request) { + numFreedContext.incrementAndGet(); + assertTrue(nodeToContextMap.containsKey(node)); + assertTrue(nodeToContextMap.get(node).remove(contextId)); + } + }; + Map lookup = new HashMap<>(); + lookup.put(primaryNode.getId(), primaryNode); + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction(logger, transportService, lookup::get, + Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0) { + TestSearchResponse response = new TestSearchResponse(); + + @Override + protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener listener) { + assertTrue("shard: " + request.shardId() + " has been queried twice", response.queried.add(request.shardId())); + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(), node); + Set ids = nodeToContextMap.computeIfAbsent(node, (n) -> new HashSet<>()); + ids.add(testSearchPhaseResult.id); + if (randomBoolean()) { + listener.onResponse(testSearchPhaseResult); + } else { + new Thread(() -> listener.onResponse(testSearchPhaseResult)).start(); + } + } + + @Override + protected void moveToSecondPhase() throws Exception { + for (int i = 0; i < firstResults.length(); i++) { + TestSearchPhaseResult result = firstResults.get(i); + assertEquals(result.node.getId(), result.shardTarget().getNodeId()); + sendReleaseSearchContext(result.id(), result.node); + } + responseListener.onResponse(response); + latch.countDown(); + } + + @Override + protected String firstPhaseName() { + return "test"; + } + + @Override + protected Executor getExecutor() { + fail("no executor in this class"); + return null; + } + }; + asyncAction.start(); + latch.await(); + assertNotNull(response.get()); + assertFalse(nodeToContextMap.isEmpty()); + assertTrue(nodeToContextMap.containsKey(primaryNode)); + assertEquals(shardsIter.size(), numFreedContext.get()); + assertTrue(nodeToContextMap.get(primaryNode).toString(), nodeToContextMap.get(primaryNode).isEmpty()); + + } + + private GroupShardsIterator getShardsIter(String index, int numShards, boolean doReplicas, DiscoveryNode primaryNode, + DiscoveryNode replicaNode) { + ArrayList list = new ArrayList<>(); + for (int i = 0; i < numShards; i++) { + ArrayList started = new ArrayList<>(); + ArrayList initializing = new ArrayList<>(); + ArrayList unassigned = new ArrayList<>(); + + ShardRouting routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), true, + RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + routing = routing.initialize(primaryNode.getId(), i + "p", 0); + routing.started(); + started.add(routing); + if (doReplicas) { + routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), false, + RecoverySource.PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + if (replicaNode != null) { + routing = routing.initialize(replicaNode.getId(), i + "r", 0); + if (randomBoolean()) { + routing.started(); + started.add(routing); + } else { + initializing.add(routing); + } + } else { + unassigned.add(routing); // unused yet + } + } + Collections.shuffle(started, random()); + started.addAll(initializing); + list.add(new PlainShardIterator(new ShardId(new Index(index, "_na_"), i), started)); + } + return new GroupShardsIterator(list); + } + + public static class TestSearchResponse extends SearchResponse { + public final Set queried = new HashSet<>(); + } + + public static class TestSearchPhaseResult implements SearchPhaseResult { + final long id; + final DiscoveryNode node; + SearchShardTarget shardTarget; + + public TestSearchPhaseResult(long id, DiscoveryNode node) { + this.id = id; + this.node = node; + } + + @Override + public long id() { + return id; + } + + @Override + public SearchShardTarget shardTarget() { + return this.shardTarget; + } + + @Override + public void shardTarget(SearchShardTarget shardTarget) { + this.shardTarget = shardTarget; + + } + + @Override + public void readFrom(StreamInput in) throws IOException { + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java new file mode 100644 index 00000000000..e0a5786e184 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.cache; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class CacheBuilderTests extends ESTestCase { + + public void testSettingExpireAfterAccess() { + IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.MINUS_ONE)); + assertThat(iae.getMessage(), containsString("expireAfterAccess <=")); + iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.ZERO)); + assertThat(iae.getMessage(), containsString("expireAfterAccess <=")); + final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), ""); + Cache cache = CacheBuilder.builder().setExpireAfterAccess(timeValue).build(); + assertEquals(timeValue.getNanos(), cache.getExpireAfterAccessNanos()); + } + + public void testSettingExpireAfterWrite() { + IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.MINUS_ONE)); + assertThat(iae.getMessage(), containsString("expireAfterWrite <=")); + iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.ZERO)); + assertThat(iae.getMessage(), containsString("expireAfterWrite <=")); + final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), ""); + Cache cache = CacheBuilder.builder().setExpireAfterWrite(timeValue).build(); + assertEquals(timeValue.getNanos(), cache.getExpireAfterWriteNanos()); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 3b88a3bdcfe..d8dbaa673a0 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -228,7 +228,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterAccess(1); + cache.setExpireAfterAccessNanos(1); List evictedKeys = new ArrayList<>(); cache.setRemovalListener(notification -> { assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); @@ -265,7 +265,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterWrite(1); + cache.setExpireAfterWriteNanos(1); List evictedKeys = new ArrayList<>(); cache.setRemovalListener(notification -> { assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); @@ -307,7 +307,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterAccess(1); + cache.setExpireAfterAccessNanos(1); now.set(0); for (int i = 0; i < numberOfEntries; i++) { cache.put(i, Integer.toString(i)); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 2cc4889be9d..733d3d1775d 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -113,6 +115,82 @@ public class ObjectParserTests extends ESTestCase { } + /** + * This test ensures we can use a classic pull-parsing parser + * together with the object parser + */ + public void testUseClassicPullParsingSubParser() throws IOException { + class ClassicParser { + URI parseURI(XContentParser parser) throws IOException { + String fieldName = null; + String host = ""; + int port = 0; + XContentParser.Token token; + while (( token = parser.currentToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING){ + if (fieldName.equals("host")) { + host = parser.text(); + } else { + throw new IllegalStateException("boom"); + } + } else if (token == XContentParser.Token.VALUE_NUMBER){ + if (fieldName.equals("port")) { + port = parser.intValue(); + } else { + throw new IllegalStateException("boom"); + } + } + parser.nextToken(); + } + return URI.create(host + ":" + port); + } + } + class Foo { + public String name; + public URI uri; + public void setName(String name) { + this.name = name; + } + + public void setURI(URI uri) { + this.uri = uri; + } + } + + class CustomParseFieldMatchSupplier implements ParseFieldMatcherSupplier { + + public final ClassicParser parser; + + CustomParseFieldMatchSupplier(ClassicParser parser) { + this.parser = parser; + } + + @Override + public ParseFieldMatcher getParseFieldMatcher() { + return ParseFieldMatcher.EMPTY; + } + + public URI parseURI(XContentParser parser) { + try { + return this.parser.parseURI(parser); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + XContentParser parser = XContentType.JSON.xContent() + .createParser("{\"url\" : { \"host\": \"http://foobar\", \"port\" : 80}, \"name\" : \"foobarbaz\"}"); + ObjectParser objectParser = new ObjectParser<>("foo"); + objectParser.declareString(Foo::setName, new ParseField("name")); + objectParser.declareObjectOrDefault(Foo::setURI, (p, s) -> s.parseURI(p), () -> null, new ParseField("url")); + Foo s = objectParser.parse(parser, new Foo(), new CustomParseFieldMatchSupplier(new ClassicParser())); + assertEquals(s.uri.getHost(), "foobar"); + assertEquals(s.uri.getPort(), 80); + assertEquals(s.name, "foobarbaz"); + } + public void testExceptions() throws IOException { XContentParser parser = XContentType.JSON.xContent().createParser("{\"test\" : \"foo\"}"); class TestStruct { diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index 91c2655b3c2..e3556c8cc7c 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; +import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; import static org.elasticsearch.client.Requests.getRequest; import static org.elasticsearch.client.Requests.indexRequest; @@ -191,31 +192,31 @@ public class DocumentActionsIT extends ESIntegTestCase { assertThat(bulkResponse.getItems().length, equalTo(5)); assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[0].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[0].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create")); + assertThat(bulkResponse.getItems()[1].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[1].getId(), equalTo("2")); assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[2].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1")); String generatedId3 = bulkResponse.getItems()[2].getId(); assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete")); + assertThat(bulkResponse.getItems()[3].getOpType(), equalTo(OpType.DELETE)); assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[3].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true)); - assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[4].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1")); diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index afde263d73d..7ba78afb8c6 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -77,82 +77,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase { return new CompressedXContent(builder.string()); } - public void testFilteringAliases() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - add(indexService, "all", null); - - assertThat(indexService.getMetaData().getAliases().containsKey("cats"), equalTo(true)); - assertThat(indexService.getMetaData().getAliases().containsKey("dogs"), equalTo(true)); - assertThat(indexService.getMetaData().getAliases().containsKey("turtles"), equalTo(false)); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats").toString(), equalTo("animal:cat")); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "dogs").toString(), equalTo("animal:cat animal:dog")); - - // Non-filtering alias should turn off all filters because filters are ORed - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all"), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "all"), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all", "cats"), nullValue()); - - add(indexService, "cats", filter(termQuery("animal", "feline"))); - add(indexService, "dogs", filter(termQuery("animal", "canine"))); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline")); - } - - public void testAliasFilters() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext()), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs").toString(), equalTo("animal:dog")); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:dog animal:cat")); - - add(indexService, "cats", filter(termQuery("animal", "feline"))); - add(indexService, "dogs", filter(termQuery("animal", "canine"))); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline")); - } - - public void testRemovedAliasFilter() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - remove(indexService, "cats"); - try { - indexService.aliasFilter(indexService.newQueryShardContext(), "cats"); - fail("Expected InvalidAliasNameException"); - } catch (InvalidAliasNameException e) { - assertThat(e.getMessage(), containsString("Invalid alias name [cats]")); - } - } - - public void testUnknownAliasFilter() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - - try { - indexService.aliasFilter(indexService.newQueryShardContext(), "unknown"); - fail(); - } catch (InvalidAliasNameException e) { - // all is well - } - } - - private void remove(IndexService service, String alias) { - IndexMetaData build = IndexMetaData.builder(service.getMetaData()).removeAlias(alias).build(); - service.updateMetaData(build); - } - - private void add(IndexService service, String alias, @Nullable CompressedXContent filter) { - IndexMetaData build = IndexMetaData.builder(service.getMetaData()).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build(); - service.updateMetaData(build); - } - public void testBaseAsyncTask() throws InterruptedException, IOException { IndexService indexService = createIndex("test", Settings.EMPTY); AtomicReference latch = new AtomicReference<>(new CountDownLatch(1)); diff --git a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 2a961d58928..76d3bfbc484 100644 --- a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.ScriptService; @@ -81,8 +82,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { } @Override - public String[] filteringAliases() { - return new String[0]; + public QueryBuilder filteringAliases() { + return null; } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 7c4acb44039..8226d18239a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -84,7 +84,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("point.lon"), notNullValue()); assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(stored)); assertThat(doc.rootDoc().getField("point.geohash"), nullValue()); - if (indexCreatedBefore22 == true) { + if (indexCreatedBefore22) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java new file mode 100644 index 00000000000..f4e83dde46a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +public class MapperTests extends ESTestCase { + + public void testSuccessfulBuilderContext() { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + ContentPath contentPath = new ContentPath(1); + Mapper.BuilderContext context = new Mapper.BuilderContext(settings, contentPath); + + assertEquals(settings, context.indexSettings()); + assertEquals(contentPath, context.path()); + } + + public void testBuilderContextWithIndexSettingsAsNull() { + NullPointerException e = expectThrows(NullPointerException.class, () -> new Mapper.BuilderContext(null, new ContentPath(1))); + } + + +} diff --git a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 88d22de6e27..2454150be56 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -101,7 +101,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {2, 3}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -115,7 +116,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {200, 30}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -132,7 +134,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz") }; float[] boosts = new float[] {2}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -154,7 +157,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { .add(expectedClause1, Occur.SHOULD) .add(expectedClause2, Occur.SHOULD) .build(); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 14b1b7c1b5e..8682d8127ae 100644 --- a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -23,10 +23,10 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.InvalidIndexNameException; @@ -34,6 +34,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Collection; @@ -47,7 +49,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -57,6 +58,8 @@ public class IndexActionIT extends ESIntegTestCase { * This test tries to simulate load while creating an index and indexing documents * while the index is being created. */ + + @TestLogging("_root:DEBUG,org.elasticsearch.index.shard.IndexShard:TRACE,org.elasticsearch.action.search:TRACE") public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { @@ -66,7 +69,7 @@ public class IndexActionIT extends ESIntegTestCase { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test", "type").setSource("field", "value")); + builders.add(client().prepareIndex("test", "type").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); @@ -74,7 +77,13 @@ public class IndexActionIT extends ESIntegTestCase { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search with all types"); - assertHitCount(client().prepareSearch("test").get(), numOfDocs); + SearchResponse response = client().prepareSearch("test").get(); + if (response.getHits().totalHits() != numOfDocs) { + final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } } catch (Exception e) { logger.error("search for all docs types failed", e); if (firstError == null) { @@ -83,7 +92,13 @@ public class IndexActionIT extends ESIntegTestCase { } try { logger.debug("running search with a specific type"); - assertHitCount(client().prepareSearch("test").setTypes("type").get(), numOfDocs); + SearchResponse response = client().prepareSearch("test").setTypes("type").get(); + if (response.getHits().totalHits() != numOfDocs) { + final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } } catch (Exception e) { logger.error("search for all docs of a specific type failed", e); if (firstError == null) { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 078bf499ff4..0803a788e8a 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -19,9 +19,11 @@ package org.elasticsearch.indices; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -30,6 +32,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.chrono.ISOChronology; +import org.joda.time.format.DateTimeFormat; import java.util.List; @@ -441,4 +444,55 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { equalTo(5L)); } + public void testCacheWithFilteredAlias() { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "created_at", "type=date") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS, + 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .addAlias(new Alias("last_week").filter(QueryBuilders.rangeQuery("created_at").gte("now-7d/d"))) + .get()); + DateTime now = new DateTime(DateTimeZone.UTC); + client().prepareIndex("index", "type", "1").setRouting("1").setSource("created_at", + DateTimeFormat.forPattern("YYYY-MM-dd").print(now)).get(); + refresh(); + + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(2L)); + + r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(2L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(2L)); + } + } diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index acf5c26e565..b9426b83e66 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -317,7 +317,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); int numIndexRequests = 0; for (int i = 0; i < numRequest; i++) { - ActionRequest request; + DocWriteRequest request; if (randomBoolean()) { if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index d8cf1e7b5ec..2490134db4e 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.routing; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -259,7 +260,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("index")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -280,7 +281,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("update")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -301,7 +302,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("delete")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); diff --git a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java index e98b469f955..c29822990f0 100644 --- a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.script.Script; @@ -68,6 +69,7 @@ public abstract class AbstractSearchTestCase extends ESTestCase { protected NamedWriteableRegistry namedWriteableRegistry; protected SearchRequestParsers searchRequestParsers; private TestSearchExtPlugin searchExtPlugin; + protected IndicesQueriesRegistry queriesRegistry; public void setUp() throws Exception { super.setUp(); @@ -79,6 +81,7 @@ public abstract class AbstractSearchTestCase extends ESTestCase { entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); searchRequestParsers = searchModule.getSearchRequestParsers(); + queriesRegistry = searchModule.getQueryParserRegistry(); } protected SearchSourceBuilder createSearchSourceBuilder() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 7de8f6a4988..0def6726e03 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -41,6 +42,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -173,7 +175,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase { try { QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - new SearchSourceBuilder(), new String[0], false)); + new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY))); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null /* not a scroll */); diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 452b6b6ba3a..8c501d71e0a 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -19,21 +19,51 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.Version; +import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.RandomQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.AbstractSearchTestCase; import java.io.IOException; +import java.util.Base64; +import java.util.function.Function; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { + private IndexMetaData baseMetaData = IndexMetaData.builder("test").settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(1).build(); public void testSerialization() throws Exception { ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest(); @@ -43,7 +73,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { ShardSearchTransportRequest deserializedRequest = new ShardSearchTransportRequest(); deserializedRequest.readFrom(in); assertEquals(deserializedRequest.scroll(), shardSearchTransportRequest.scroll()); - assertArrayEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); + assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); assertArrayEquals(deserializedRequest.indices(), shardSearchTransportRequest.indices()); assertArrayEquals(deserializedRequest.types(), shardSearchTransportRequest.types()); assertEquals(deserializedRequest.indicesOptions(), shardSearchTransportRequest.indicesOptions()); @@ -55,6 +85,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards()); assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey()); assertNotSame(deserializedRequest, shardSearchTransportRequest); + assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); } } } @@ -64,13 +95,129 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt()); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason")); - String[] filteringAliases; + final AliasFilter filteringAliases; if (randomBoolean()) { - filteringAliases = generateRandomStringArray(10, 10, false, false); + String[] strings = generateRandomStringArray(10, 10, false, false); + filteringAliases = new AliasFilter(RandomQueryBuilder.createQuery(random()), strings); } else { - filteringAliases = Strings.EMPTY_ARRAY; + filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY); } return new ShardSearchTransportRequest(searchRequest, shardRouting, randomIntBetween(1, 100), filteringAliases, Math.abs(randomLong())); } + + public void testFilteringAliases() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog"))); + indexMetaData = add(indexMetaData, "all", null); + + assertThat(indexMetaData.getAliases().containsKey("cats"), equalTo(true)); + assertThat(indexMetaData.getAliases().containsKey("dogs"), equalTo(true)); + assertThat(indexMetaData.getAliases().containsKey("turtles"), equalTo(false)); + + assertEquals(aliasFilter(indexMetaData, "cats"), QueryBuilders.termQuery("animal", "cat")); + assertEquals(aliasFilter(indexMetaData, "cats", "dogs"), QueryBuilders.boolQuery().should(QueryBuilders.termQuery("animal", "cat")) + .should(QueryBuilders.termQuery("animal", "dog"))); + + // Non-filtering alias should turn off all filters because filters are ORed + assertThat(aliasFilter(indexMetaData,"all"), nullValue()); + assertThat(aliasFilter(indexMetaData, "cats", "all"), nullValue()); + assertThat(aliasFilter(indexMetaData, "all", "cats"), nullValue()); + + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "feline"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "canine"))); + assertEquals(aliasFilter(indexMetaData, "dogs", "cats"),QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("animal", "canine")) + .should(QueryBuilders.termQuery("animal", "feline"))); + } + + public void testRemovedAliasFilter() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = remove(indexMetaData, "cats"); + try { + aliasFilter(indexMetaData, "cats"); + fail("Expected InvalidAliasNameException"); + } catch (InvalidAliasNameException e) { + assertThat(e.getMessage(), containsString("Invalid alias name [cats]")); + } + } + + public void testUnknownAliasFilter() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog"))); + IndexMetaData finalIndexMetadata = indexMetaData; + expectThrows(InvalidAliasNameException.class, () -> aliasFilter(finalIndexMetadata, "unknown")); + } + + public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.close(); + return new CompressedXContent(builder.string()); + } + + private IndexMetaData remove(IndexMetaData indexMetaData, String alias) { + IndexMetaData build = IndexMetaData.builder(indexMetaData).removeAlias(alias).build(); + return build; + } + + private IndexMetaData add(IndexMetaData indexMetaData, String alias, @Nullable CompressedXContent filter) { + return IndexMetaData.builder(indexMetaData).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build(); + } + + public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) { + Function contextFactory = (p) -> new QueryParseContext(queriesRegistry, + p, new ParseFieldMatcher(Settings.EMPTY)); + return ShardSearchRequest.parseAliasFilter(contextFactory, indexMetaData, aliasNames); + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAh4cXptdEhJcgdnT0d1ZldWyfL/sgQBJAHkDAMBAAIBAQ4TWlljWlZ5TkVmRU5xQnFQVHBjVBRZbUpod2pRV2dDSXVxRXpRaEdGVBRFZWFJY0plT2hn" + + "UEpISFhmSXR6Qw5XZ1hQcmFidWhWalFSQghuUWNwZ2JjQxBtZldRREJPaGF3UnlQSE56EVhQSUtRa25Iekh3bU5kbGVECWlFT2NIeEh3RgZIYXpMTWgUeGJq" + + "VU9Tdkdua3RORU5QZkNrb1EOalRyWGh5WXhvZ3plV2UUcWlXZFl2eUFUSXdPVGdMUUtYTHAJU3RKR3JxQkVJEkdEQ01xUHpnWWNaT3N3U3prSRIUeURlVFpM" + + "Q1lBZERZcWpDb3NOVWIST1NyQlZtdUNrd0F1UXRvdVRjEGp6RlVMd1dqc3VtUVNaTk0JT3N2cnpLQ3ZLBmRpS1J6cgdYbmVhZnBxBUlTUU9pEEJMcm1ERXVs" + + "eXhESlBoVkgTaWdUUmtVZGh4d0FFc2ZKRm9ZahNrb01XTnFFd2NWSVVDU3pWS2xBC3JVTWV3V2tUUWJUE3VGQU1Hd21CYUFMTmNQZkxobXUIZ3dxWHBxWXcF" + + "bmNDZUEOTFBSTEpYZVF6Z3d2eE0PV1BucUFacll6WWRxa1hCDGxkbXNMaVRzcUZXbAtSY0NsY3FNdlJQcv8BAP////8PAQAAARQAAQp5THlIcHdQeGtMAAAB" + + "AQAAAAEDbkVLAQMBCgACAAADAQABAAAAAQhIc25wRGxQbwEBQgABAAACAQMAAAEIAAAJMF9OSG9kSmh2HwABAwljRW5MVWxFbVQFemlxWG8KcXZQTkRUUGJk" + + "bgECCkpMbXVMT1dtVnkISEdUUHhsd0cBAAEJAAABA2lkcz+rKsUAAAAAAAAAAAECAQYAAgwxX0ZlRWxSQkhzQ07/////DwABAAEDCnRyYXFHR1hjVHkKTERY" + + "aE1HRWVySghuSWtzbEtXUwABCgEHSlRwQnhwdwAAAQECAgAAAAAAAQcyX3FlYmNDGQEEBklxZU9iUQdTc01Gek5YCWlMd2xuamNRQwNiVncAAUHt61kAAQR0" + + "ZXJtP4AAAAANbUtDSnpHU3lidm5KUBUMaVpqeG9vcm5QSFlvAAEBLGdtcWxuRWpWTXdvTlhMSHh0RWlFdHBnbEF1cUNmVmhoUVlwRFZxVllnWWV1A2ZvbwEA" + + "AQhwYWlubGVzc/8AALk4AAAAAAABAAAAAAAAAwpKU09PU0ZmWnhFClVqTGxMa2p3V2gKdUJwZ3R3dXFER5Hg97uT7MOmPgEADw")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + ShardSearchTransportRequest readRequest = new ShardSearchTransportRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases()); + assertEquals("alias filter for aliases: [JSOOSFfZxE, UjLlLkjwWh, uBpgtwuqDG] must be rewritten first", + illegalStateException.getMessage()); + IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder(baseMetaData) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("JSOOSFfZxE").filter("{\"term\" : {\"foo\" : \"bar\"}}")) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("UjLlLkjwWh").filter("{\"term\" : {\"foo\" : \"bar1\"}}")) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("uBpgtwuqDG").filter("{\"term\" : {\"foo\" : \"bar2\"}}")); + IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.EMPTY); + final long nowInMillis = randomPositiveLong(); + QueryShardContext context = new QueryShardContext( + 0, indexSettings, null, null, null, null, null, queriesRegistry, null, null, null, + () -> nowInMillis); + readRequest.rewrite(context); + QueryBuilder queryBuilder = readRequest.filteringAliases(); + assertEquals(queryBuilder, QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("foo", "bar")) + .should(QueryBuilders.termQuery("foo", "bar1")) + .should(QueryBuilders.termQuery("foo", "bar2")) + ); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 693fffa307a..1a10a700948 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -122,6 +122,13 @@ public class MultiMatchQueryIT extends ESIntegTestCase { "last_name", "", "category", "marvel hero", "skill", 1)); + + builders.add(client().prepareIndex("test", "test", "nowHero").setSource( + "full_name", "now sort of", + "first_name", "now", + "last_name", "", + "category", "marvel hero", + "skill", 1)); List firstNames = new ArrayList<>(); fill(firstNames, "Captain", between(15, 25)); fill(firstNames, "Ultimate", between(5, 10)); @@ -164,6 +171,9 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .field("norms", false) .field("copy_to", "last_name_phrase") .endObject() + .startObject("date") + .field("type", "date") + .endObject() .endObject() .endObject().endObject(); } @@ -633,6 +643,52 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .lenient(true))).get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("ultimate1")); + + + // Check that cross fields works with date fields + searchResponse = client().prepareSearch("test") + .setQuery(randomizeType(multiMatchQuery("now", "f*", "date") + .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true)) + .get(); + assertHitCount(searchResponse, 1L); + assertFirstHit(searchResponse, hasId("nowHero")); + } + + /** + * Test for edge case where field level boosting is applied to field that doesn't exist on documents on + * one shard. There was an issue reported in https://github.com/elastic/elasticsearch/issues/18710 where a + * `multi_match` query using the fuzziness parameter with a boost on one of two fields returns the + * same document score if both documents are placed on different shard. This test recreates that scenario + * and checks that the returned scores are different. + */ + public void testFuzzyFieldLevelBoosting() throws InterruptedException, ExecutionException { + String idx = "test18710"; + CreateIndexRequestBuilder builder = prepareCreate(idx).setSettings(Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + ); + assertAcked(builder.addMapping("type", "title", "type=string", "body", "type=string")); + ensureGreen(); + List builders = new ArrayList<>(); + builders.add(client().prepareIndex(idx, "type", "1").setSource( + "title", "foo", + "body", "bar")); + builders.add(client().prepareIndex(idx, "type", "2").setSource( + "title", "bar", + "body", "foo")); + indexRandom(true, false, builders); + + SearchResponse searchResponse = client().prepareSearch(idx) + .setExplain(true) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body") + .fuzziness(0) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); + assertEquals("1", hits[0].getId()); + assertEquals("2", hits[1].getId()); + assertThat(hits[0].getScore(), greaterThan(hits[1].score())); } private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index fb55f5bb767..200ec6ac4b1 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -715,6 +715,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("use_lucene_corruption", true) + .put("max_failure_number", 10000000L) .put("random_data_file_io_exception_rate", 1.0))); // Test restore after index deletion diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 20c82e6f518..eeebe8cbcdc 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -257,7 +257,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { if (nearP == null) nearP = xRandomPointIn(r, bounds); - if (small == true) { + if (small) { // between 3 and 6 degrees final double latRange = 3 * r.nextDouble() + 3; final double lonRange = 3 * r.nextDouble() + 3; diff --git a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 48ea8b6c8c9..5ec0f30f520 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -33,7 +33,7 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); // some of the fixed thread pool are bound by the number of // cores so we can not exceed that - final int size = randomIntBetween(1, EsExecutors.boundedNumberOfProcessors(Settings.EMPTY)); + final int size = randomIntBetween(1, EsExecutors.numberOfProcessors(Settings.EMPTY)); final int queueSize = randomIntBetween(1, 16); final long rejections = randomIntBetween(1, 16); diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 87accf057ad..29053400931 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -62,7 +62,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { public void testIndexingThreadPoolsMaxSize() throws InterruptedException { final String name = randomFrom(Names.BULK, Names.INDEX); - final int maxSize = 1 + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY); + final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY); final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE); // try to create a too big thread pool @@ -89,7 +89,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { - return Math.min(size, EsExecutors.boundedNumberOfProcessors(settings)); + return Math.min(size, EsExecutors.numberOfProcessors(settings)); } else { return size; } @@ -185,7 +185,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { new ScalingExecutorBuilder( "my_pool1", 1, - EsExecutors.boundedNumberOfProcessors(Settings.EMPTY), + EsExecutors.numberOfProcessors(Settings.EMPTY), TimeValue.timeValueMinutes(1)); final FixedExecutorBuilder fixed = new FixedExecutorBuilder(Settings.EMPTY, "my_pool2", 1, 1); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 3aaaafe5fa4..ccb392be1fc 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -21,9 +21,9 @@ package org.elasticsearch.versioning; import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.uid.Versions; @@ -686,7 +686,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) + .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() @@ -755,7 +755,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) + .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() diff --git a/docs/build.gradle b/docs/build.gradle index 5a65065a639..3286648da96 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -93,9 +93,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc', 'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc', 'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc', - 'reference/cat/master.asciidoc', - 'reference/cat/nodeattrs.asciidoc', - 'reference/cat/nodes.asciidoc', 'reference/cat/pending_tasks.asciidoc', 'reference/cat/plugins.asciidoc', 'reference/cat/recovery.asciidoc', diff --git a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc index e49962a58f7..ef91d0b7000 100644 --- a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc +++ b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc @@ -1,5 +1,5 @@ [[java-aggs-metrics-geobounds]] -==== Cardinality Aggregation +==== Geo Bounds Aggregation Here is how you can use {ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds Aggregation] diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index 915a3c6a389..fa89314a230 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -54,7 +54,7 @@ PUT child_example/answer/1?parent=1&refresh "display_name": "Sam", "id": 48 }, - "body": "

    Unfortunately your pretty much limited to FTP...", + "body": "

    Unfortunately you're pretty much limited to FTP...", "creation_date": "2009-05-04T13:45:37.030" } PUT child_example/answer/2?parent=1&refresh diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 91965f7b6fc..11dfb3c7c6a 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -67,6 +67,7 @@ GET /_cat/indices?v&s=store.size:desc -------------------------------------------------- // CONSOLE // TEST[continued] +// TEST[s/^/POST _flush\n/] Which looks like: diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index caed564d7b5..cf203a3eee0 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -2,14 +2,22 @@ == cat master `master` doesn't have any extra options. It simply displays the -master's node ID, bound IP address, and node name. +master's node ID, bound IP address, and node name. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/master?v' -id ip node -Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA +GET /_cat/master?v -------------------------------------------------- +// CONSOLE + +might respond: + +[source,js] +-------------------------------------------------- +id host ip node +YzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2 +-------------------------------------------------- +// TESTRESPONSE[s/YzWoH_2.+/.+/ _cat] This information is also available via the `nodes` command, but this is slightly shorter when all you want to do, for example, is verify @@ -25,3 +33,4 @@ Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA [3] 19:16:37 [SUCCESS] es1.vm Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA -------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 1677583a709..a7cbecb50db 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -2,34 +2,26 @@ == cat nodeattrs The `nodeattrs` command shows custom node attributes. +For example: -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodeattrs -node host ip attr value -DKDM97B epsilon 192.168.1.8 rack rack314 -DKDM97B epsilon 192.168.1.8 azone us-east-1 +GET /_cat/nodeattrs?v -------------------------------------------------- +// CONSOLE -The first few columns give you basic info per node. +Could look like: - -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -node host ip -DKDM97B epsilon 192.168.1.8 -DKDM97B epsilon 192.168.1.8 +node host ip attr value +EK_AsJb 127.0.0.1 127.0.0.1 testattr test -------------------------------------------------- +// TESTRESPONSE[s/EK_AsJb/.+/ _cat] - -The attr and value columns can give you a picture of custom node attributes. - -[source,sh] --------------------------------------------------- -attr value -rack rack314 -azone us-east-1 --------------------------------------------------- +The first few columns (`node`, `host`, `ip`) give you basic info per node +and the `attr` and `value` columns give you the custom node attributes, +one per line. [float] === Columns @@ -49,13 +41,20 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodeattrs?v&h=name,pid,attr,value -name pid attr value -DKDM97B 28000 rack rack314 -DKDM97B 28000 azone us-east-1 +GET /_cat/nodeattrs?v&h=name,pid,attr,value -------------------------------------------------- +// CONSOLE + +Might look like: + +[source,js] +-------------------------------------------------- +name pid attr value +EK_AsJb 19566 testattr test +-------------------------------------------------- +// TESTRESPONSE[s/EK_AsJb/.+/ s/19566/\\d*/ _cat] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index b0b152d4c50..8885e490fca 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -1,38 +1,31 @@ [[cat-nodes]] == cat nodes -The `nodes` command shows the cluster topology. +The `nodes` command shows the cluster topology. For example -[source,sh] +[source,js] -------------------------------------------------- -% GET /_cat/nodes -192.168.56.30 9 78 22 1.80 2.05 2.51 mdi * bGG90GE -192.168.56.10 6 75 14 1.24 2.45 1.37 md - I8hydUG -192.168.56.20 5 71 12 1.07 1.05 1.11 di - H5dfFeA +GET /_cat/nodes?v -------------------------------------------------- +// CONSOLE -The first few columns tell you where your nodes live and give -a picture of your heap, memory, cpu and load. +Might look like: -[source,sh] +[source,js] -------------------------------------------------- -ip heap.percent ram.percent cpu load_1m load_5m load_15m -192.168.56.30 9 78 22 1.80 2.05 2.51 -192.168.56.10 6 75 14 1.24 2.45 1.37 -192.168.56.20 5 71 12 1.07 1.05 1.11 +ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name +127.0.0.1 65 99 42 3.07 mdi * mJw06l1 -------------------------------------------------- +// TESTRESPONSE[s/3.07/(\\d+\\.\\d+( \\d+\\.\\d+ (\\d+\\.\\d+)?)?)?/] +// TESTRESPONSE[s/65 99 42/\\d+ \\d+ \\d+/] +// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ _cat] -The last columns provide ancillary information that can often be -useful when looking at the cluster as a whole, particularly large -ones. How many master-eligible nodes do I have? +The first few columns (`ip, `heap.percent`, `ram.percent`, `cpu, `load_*`) tell +you where your nodes live and give a quick picture of performance stats. -[source,sh] --------------------------------------------------- -node.role master name -mdi * bGG90GE -md - I8hydUG -di - H5dfFeA --------------------------------------------------- +The last (`node.role`, `master`, and `name`) columns provide ancillary +information that can often be useful when looking at the cluster as a whole, +particularly large ones. How many master-eligible nodes do I have? [float] === Columns @@ -52,18 +45,20 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodes?v&h=id,ip,port,v,m -id ip port v m -pLSN 192.168.56.30 9300 {version} - -k0zy 192.168.56.10 9300 {version} - -6Tyi 192.168.56.20 9300 {version} * -% curl 192.168.56.10:9200/_cat/nodes?h=id,ip,port,v,m -pLSN 192.168.56.30 9300 {version} - -k0zy 192.168.56.10 9300 {version} - -6Tyi 192.168.56.20 9300 {version} * +GET /_cat/nodes?v&h=id,ip,port,v,m -------------------------------------------------- +// CONSOLE + +Might look like: + +["source","js",subs="attributes,callouts"] +-------------------------------------------------- +id ip port v m +veJR 127.0.0.1 59938 {version} * +-------------------------------------------------- +// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ _cat] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 4fc4182505b..b2d4bb562ea 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -325,7 +325,7 @@ of configured copies per shard in the index (which is `number_of_replicas+1`). Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose we have a cluster of three nodes, `A, `B`, and `C` and +For example, suppose we have a cluster of three nodes, `A`, `B`, and `C` and we create an index `index` with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If we attempt an indexing operation, by default the operation will only ensure diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 4d74500d68e..aa6846d1e8a 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -1,9 +1,20 @@ [[modules-snapshots]] == Snapshot And Restore -The snapshot and restore module allows to create snapshots of individual indices or an entire cluster into a remote -repository. At the time of the initial release only shared file system repository was supported, but now a range of -backends are available via officially supported repository plugins. +The snapshot and restore module allows to create snapshots of individual +indices or an entire cluster into a remote repository like shared file system, +S3, or HDFS. These snapshots are great for backups because they can be restored +relatively quickly but they are not archival because they can only be restored +to versions of Elasticsearch that can read the index. That means that: + +* A snapshot of an index created in 2.x can be restored to 5.x. +* A snapshot of an index created in 1.x can be restored to 2.x. +* A snapshot of an index created in 1.x can **not** be restored to 5.x. + +To restore a snapshot of an index created in 1.x to 5.x you can restore it to +a 2.x cluster and use <> to rebuild +the index in a 5.x cluster. This is as time consuming as restoring from +archival copies of the original data. [float] === Repositories @@ -516,5 +527,3 @@ well as the global metadata were readable. The restore operation requires the gl the index level blocks are ignored during restore because indices are essentially recreated during restore. Please note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal repository operations such as listing or deleting snapshots from an already registered repository. - - diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index a333312e0fb..4e8b5c61efd 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -118,14 +118,30 @@ thread_pool: [[processors]] === Processors setting The number of processors is automatically detected, and the thread pool -settings are automatically set based on it. Sometimes, the number of processors -are wrongly detected, in such cases, the number of processors can be -explicitly set using the `processors` setting. +settings are automatically set based on it. In some cases it can be +useful to override the number of detected processors. This can be done +by explicitly setting the `processors` setting. [source,yaml] -------------------------------------------------- processors: 2 -------------------------------------------------- +There are a few use-cases for explicitly overriding the `processors` +setting: + +. If you are running multiple instances of Elasticsearch on the same +host but want Elasticsearch to size its thread pools as if it only has a +fraction of the CPU, you should override the `processors` setting to the +desired fraction (e.g., if you're running two instances of Elasticsearch +on a 16-core machine, set `processors` to 8). Note that this is an +expert-level use-case and there's a lot more involved than just setting +the `processors` setting as there are other considerations like changing +the number of garbage collector threads, pinning processes to cores, +etc. +. Sometimes the number of processors is wrongly detected and in such +cases explicitly setting the `processors` setting will workaround such +issues. + In order to check the number of processors detected, use the nodes info API with the `os` flag. diff --git a/docs/reference/setup/reindex_upgrade.asciidoc b/docs/reference/setup/reindex_upgrade.asciidoc index 8ee9c61e424..f9e7a60ee5b 100644 --- a/docs/reference/setup/reindex_upgrade.asciidoc +++ b/docs/reference/setup/reindex_upgrade.asciidoc @@ -19,6 +19,20 @@ If you are running an Elasticsearch 2.x cluster or older, you have two options: * Create a new 6.x cluster and use reindex-from-remote to import indices directly from the 2.x cluster. See <>. +.Time-based indices and retention periods +******************************************* + +For many use cases with time-based indices, you will not need to worry about +carrying old 2.x indices with you to 6.x. Data in time-based indices usually +becomes less interesting as time passes. Old indices can be deleted once they +fall outside of your retention period. + +Users in this position can continue to use 5.x until all old 2.x indices have +been deleted, then upgrade to 6.x directly. + +******************************************* + + [[reindex-upgrade-inplace]] ==== Reindex in place diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java index 8419730dc1c..95bae3732e5 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java @@ -27,41 +27,75 @@ import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheException; import com.github.mustachejava.MustacheVisitor; import com.github.mustachejava.TemplateContext; +import com.github.mustachejava.codes.DefaultMustache; import com.github.mustachejava.codes.IterableCode; import com.github.mustachejava.codes.WriteCode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.io.StringWriter; import java.io.Writer; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.StringJoiner; -import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; public class CustomMustacheFactory extends DefaultMustacheFactory { - private final BiConsumer encoder; + static final String CONTENT_TYPE_PARAM = "content_type"; - public CustomMustacheFactory(boolean escaping) { + static final String JSON_MIME_TYPE = "application/json"; + static final String PLAIN_TEXT_MIME_TYPE = "text/plain"; + static final String X_WWW_FORM_URLENCODED_MIME_TYPE = "application/x-www-form-urlencoded"; + + private static final String DEFAULT_MIME_TYPE = JSON_MIME_TYPE; + + private static final Map> ENCODERS; + static { + Map> encoders = new HashMap<>(); + encoders.put(JSON_MIME_TYPE, JsonEscapeEncoder::new); + encoders.put(PLAIN_TEXT_MIME_TYPE, DefaultEncoder::new); + encoders.put(X_WWW_FORM_URLENCODED_MIME_TYPE, UrlEncoder::new); + ENCODERS = Collections.unmodifiableMap(encoders); + } + + private final Encoder encoder; + + public CustomMustacheFactory(String mimeType) { super(); setObjectHandler(new CustomReflectionObjectHandler()); - if (escaping) { - this.encoder = new JsonEscapeEncoder(); - } else { - this.encoder = new NoEscapeEncoder(); - } + this.encoder = createEncoder(mimeType); + } + + public CustomMustacheFactory() { + this(DEFAULT_MIME_TYPE); } @Override public void encode(String value, Writer writer) { - encoder.accept(value, writer); + try { + encoder.encode(value, writer); + } catch (IOException e) { + throw new MustacheException("Unable to encode value", e); + } + } + + static Encoder createEncoder(String mimeType) { + Supplier supplier = ENCODERS.get(mimeType); + if (supplier == null) { + throw new IllegalArgumentException("No encoder found for MIME type [" + mimeType + "]"); + } + return supplier.get(); } @Override @@ -83,6 +117,8 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { list.add(new JoinerCode(templateContext, df, mustache)); } else if (CustomJoinerCode.match(variable)) { list.add(new CustomJoinerCode(templateContext, df, mustache, variable)); + } else if (UrlEncoderCode.match(variable)) { + list.add(new UrlEncoderCode(templateContext, df, mustache, variable)); } else { list.add(new IterableCode(templateContext, df, mustache, variable)); } @@ -253,27 +289,85 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { } } - class NoEscapeEncoder implements BiConsumer { + /** + * This function encodes a string using the {@link URLEncoder#encode(String, String)} method + * with the UTF-8 charset. + */ + static class UrlEncoderCode extends DefaultMustache { + + private static final String CODE = "url"; + private final Encoder encoder; + + public UrlEncoderCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String variable) { + super(tc, df, mustache.getCodes(), variable); + this.encoder = new UrlEncoder(); + } @Override - public void accept(String s, Writer writer) { - try { - writer.write(s); - } catch (IOException e) { - throw new MustacheException("Failed to encode value: " + s); + public Writer run(Writer writer, List scopes) { + if (getCodes() != null) { + for (Code code : getCodes()) { + try (StringWriter capture = new StringWriter()) { + code.execute(capture, scopes); + + String s = capture.toString(); + if (s != null) { + encoder.encode(s, writer); + } + } catch (IOException e) { + throw new MustacheException("Exception while parsing mustache function at line " + tc.line(), e); + } + } } + return writer; + } + + static boolean match(String variable) { + return CODE.equalsIgnoreCase(variable); } } - class JsonEscapeEncoder implements BiConsumer { + @FunctionalInterface + interface Encoder { + /** + * Encodes the {@code s} string and writes it to the {@code writer} {@link Writer}. + * + * @param s The string to encode + * @param writer The {@link Writer} to which the encoded string will be written to + */ + void encode(final String s, final Writer writer) throws IOException; + } + + /** + * Encoder that simply writes the string to the writer without encoding. + */ + static class DefaultEncoder implements Encoder { @Override - public void accept(String s, Writer writer) { - try { - writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); - } catch (IOException e) { - throw new MustacheException("Failed to escape and encode value: " + s); - } + public void encode(String s, Writer writer) throws IOException { + writer.write(s); + } + } + + /** + * Encoder that escapes JSON string values/fields. + */ + static class JsonEscapeEncoder implements Encoder { + + @Override + public void encode(String s, Writer writer) throws IOException { + writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); + } + } + + /** + * Encoder that escapes strings using HTML form encoding + */ + static class UrlEncoder implements Encoder { + + @Override + public void encode(String s, Writer writer) throws IOException { + writer.write(URLEncoder.encode(s, StandardCharsets.UTF_8.name())); } } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index b7d7087373c..b2bc514327c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -43,6 +43,8 @@ import java.security.PrivilegedAction; import java.util.Collections; import java.util.Map; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM; + /** * Main entry point handling template registration, compilation and * execution. @@ -55,10 +57,6 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme public static final String NAME = "mustache"; - static final String CONTENT_TYPE_PARAM = "content_type"; - static final String JSON_CONTENT_TYPE = "application/json"; - static final String PLAIN_TEXT_CONTENT_TYPE = "text/plain"; - /** Thread local UTF8StreamWriter to store template execution results in, thread local to save object creation.*/ private static ThreadLocal> utf8StreamWriter = new ThreadLocal<>(); @@ -91,13 +89,16 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme * */ @Override public Object compile(String templateName, String templateSource, Map params) { - final MustacheFactory factory = new CustomMustacheFactory(isJsonEscapingEnabled(params)); + final MustacheFactory factory = createMustacheFactory(params); Reader reader = new FastStringReader(templateSource); return factory.compile(reader, "query-template"); } - private boolean isJsonEscapingEnabled(Map params) { - return JSON_CONTENT_TYPE.equals(params.getOrDefault(CONTENT_TYPE_PARAM, JSON_CONTENT_TYPE)); + private CustomMustacheFactory createMustacheFactory(Map params) { + if (params == null || params.isEmpty() || params.containsKey(CONTENT_TYPE_PARAM) == false) { + return new CustomMustacheFactory(); + } + return new CustomMustacheFactory(params.get(CONTENT_TYPE_PARAM)); } @Override diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java new file mode 100644 index 00000000000..fefa98e8f86 --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import com.github.mustachejava.Mustache; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.JSON_MIME_TYPE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.PLAIN_TEXT_MIME_TYPE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.X_WWW_FORM_URLENCODED_MIME_TYPE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class CustomMustacheFactoryTests extends ESTestCase { + + public void testCreateEncoder() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder(null)); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type [null]")); + + e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder("")); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type []")); + + e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder("test")); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type [test]")); + + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.JSON_MIME_TYPE), + instanceOf(CustomMustacheFactory.JsonEscapeEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.PLAIN_TEXT_MIME_TYPE), + instanceOf(CustomMustacheFactory.DefaultEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.X_WWW_FORM_URLENCODED_MIME_TYPE), + instanceOf(CustomMustacheFactory.UrlEncoder.class)); + } + + public void testJsonEscapeEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = randomBoolean() ? singletonMap(CONTENT_TYPE_PARAM, JSON_MIME_TYPE) : emptyMap(); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "a \"value\"")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"a \\\"value\\\"\"}")); + } + + public void testDefaultEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_MIME_TYPE); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "a \"value\"")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"a \"value\"\"}")); + } + + public void testUrlEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = singletonMap(CONTENT_TYPE_PARAM, X_WWW_FORM_URLENCODED_MIME_TYPE); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "tilde~ AND date:[2016 FROM*]")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"tilde%7E+AND+date%3A%5B2016+FROM*%5D\"}")); + } +} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index 693ada174b9..b9f596e4d3d 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -49,7 +49,7 @@ public class MustacheScriptEngineTests extends ESTestCase { @Before public void setup() { qe = new MustacheScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - factory = new CustomMustacheFactory(true); + factory = new CustomMustacheFactory(); } public void testSimpleParameterReplace() { diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java index 9b48afe834a..becdda0e592 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java @@ -30,6 +30,8 @@ import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -43,8 +45,6 @@ import static java.util.Collections.singleton; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; -import static org.elasticsearch.script.mustache.MustacheScriptEngineService.CONTENT_TYPE_PARAM; -import static org.elasticsearch.script.mustache.MustacheScriptEngineService.PLAIN_TEXT_CONTENT_TYPE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -144,24 +144,6 @@ public class MustacheTests extends ESTestCase { assertThat(bytes.utf8ToString(), both(containsString("foo")).and(containsString("bar"))); } - public void testEscaping() { - // json string escaping enabled: - Mustache mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); - ExecutableScript executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); - BytesReference rawResult = (BytesReference) executableScript.run(); - String result = rawResult.utf8ToString(); - assertThat(result, equalTo("{ \"field1\": \"a \\\"value\\\"\"}")); - - // json string escaping disabled: - mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", - Collections.singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_CONTENT_TYPE)); - compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); - executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); - rawResult = (BytesReference) executableScript.run(); - result = rawResult.utf8ToString(); - assertThat(result, equalTo("{ \"field1\": \"a \"value\"\"}")); - } public void testSizeAccessForCollectionsAndArrays() throws Exception { String[] randomArrayValues = generateRandomStringArray(10, 20, false); @@ -375,6 +357,44 @@ public class MustacheTests extends ESTestCase { assertScript("{{#join delimiter=' and '}}params{{/join delimiter=' and '}}", params, equalTo("1 and 2 and 3 and 4")); } + public void testUrlEncoder() { + Map urls = new HashMap<>(); + urls.put("https://www.elastic.co", + "https%3A%2F%2Fwww.elastic.co"); + urls.put("", + "%3Clogstash-%7Bnow%2Fd%7D%3E"); + urls.put("?query=(foo:A OR baz:B) AND title:/joh?n(ath[oa]n)/ AND date:{* TO 2012-01}", + "%3Fquery%3D%28foo%3AA+OR+baz%3AB%29+AND+title%3A%2Fjoh%3Fn%28ath%5Boa%5Dn%29%2F+AND+date%3A%7B*+TO+2012-01%7D"); + + for (Map.Entry url : urls.entrySet()) { + assertScript("{{#url}}{{params}}{{/url}}", singletonMap("params", url.getKey()), equalTo(url.getValue())); + } + } + + public void testUrlEncoderWithParam() throws Exception { + assertScript("{{#url}}{{index}}{{/url}}", singletonMap("index", ""), + equalTo("%3Clogstash-%7Bnow%2Fd%7BYYYY.MM.dd%7C%2B12%3A00%7D%7D%3E")); + + final String random = randomAsciiOfLength(10); + assertScript("{{#url}}prefix_{{s}}{{/url}}", singletonMap("s", random), + equalTo("prefix_" + URLEncoder.encode(random, StandardCharsets.UTF_8.name()))); + } + + public void testUrlEncoderWithJoin() { + Map params = singletonMap("emails", Arrays.asList("john@smith.com", "john.smith@email.com", "jsmith@email.com")); + assertScript("?query={{#url}}{{#join}}emails{{/join}}{{/url}}", params, + equalTo("?query=john%40smith.com%2Cjohn.smith%40email.com%2Cjsmith%40email.com")); + + params = singletonMap("indices", new String[]{"", "", ""}); + assertScript("{{#url}}https://localhost:9200/{{#join}}indices{{/join}}/_stats{{/url}}", params, + equalTo("https%3A%2F%2Flocalhost%3A9200%2F%3Clogstash-%7Bnow%2Fd-2d%7D" + + "%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E%2F_stats")); + + params = singletonMap("fibonacci", new int[]{1, 1, 2, 3, 5, 8, 13, 21, 34, 55}); + assertScript("{{#url}}{{#join delimiter='+'}}fibonacci{{/join delimiter='+'}}{{/url}}", params, + equalTo("1%2B1%2B2%2B3%2B5%2B8%2B13%2B21%2B34%2B55")); + } + private void assertScript(String script, Map vars, Matcher matcher) { Object result = engine.executable(new CompiledScript(INLINE, "inline", "mustache", compile(script)), vars).run(); assertThat(result, notNullValue()); diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml new file mode 100644 index 00000000000..a4e1dde4632 --- /dev/null +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml @@ -0,0 +1,43 @@ +--- +"Rendering using {{url}} function": + + - do: + render_search_template: + body: > + { + "inline": { + "query": { + "match": { + "url": "https://localhost:9200/{{#url}}{{index}}{{/url}}/{{#url}}{{type}}{{/url}}/_search" + } + } + }, + "params": { + "index": "", + "type" : "métriques" + } + } + + - match: { template_output.query.match.url: "https://localhost:9200/%3Clogstash-%7Bnow%2Fd-2d%7D%3E/m%C3%A9triques/_search" } + +--- +"Rendering using {{url}} and {{join}} functions": + + - do: + render_search_template: + body: > + { + "inline": { + "query": { + "match": { + "url": "{{#url}}https://localhost:9200/{{#join}}indices{{/join}}/_stats{{/url}}" + } + } + }, + "params": { + "indices": ["", "", ""] + } + } + + # Decoded URL is https://localhost:9200/,,/_stats + - match: { template_output.query.match.url: "https%3A%2F%2Flocalhost%3A9200%2F%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E%2F_stats" } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 32824e969d9..2625b66eb80 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -256,22 +256,21 @@ public abstract class AbstractAsyncBulkByScrollAction> { + interface RequestWrapper> { void setIndex(String index); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index edd7d78a28f..eb5fc599d54 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -47,7 +49,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; @@ -256,35 +257,36 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { BulkItemResponse[] responses = new BulkItemResponse[randomIntBetween(0, 100)]; for (int i = 0; i < responses.length; i++) { ShardId shardId = new ShardId(new Index("name", "uid"), 0); - String opType; if (rarely()) { - opType = randomSimpleString(random()); versionConflicts++; - responses[i] = new BulkItemResponse(i, opType, new Failure(shardId.getIndexName(), "type", "id" + i, + responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), + new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test"))); continue; } boolean createdResponse; + DocWriteRequest.OpType opType; switch (randomIntBetween(0, 2)) { case 0: - opType = randomFrom("index", "create"); createdResponse = true; + opType = DocWriteRequest.OpType.CREATE; created++; break; case 1: - opType = randomFrom("index", "create"); createdResponse = false; + opType = randomFrom(DocWriteRequest.OpType.INDEX, DocWriteRequest.OpType.UPDATE); updated++; break; case 2: - opType = "delete"; createdResponse = false; + opType = DocWriteRequest.OpType.DELETE; deleted++; break; default: throw new RuntimeException("Bad scenario"); } - responses[i] = new BulkItemResponse(i, opType, new IndexResponse(shardId, "type", "id" + i, randomInt(), createdResponse)); + responses[i] = new BulkItemResponse(i, opType, + new IndexResponse(shardId, "type", "id" + i, randomInt(), createdResponse)); } new DummyAbstractAsyncBulkByScrollAction().onBulkResponse(timeValueNanos(System.nanoTime()), new BulkResponse(responses, 0)); assertEquals(versionConflicts, testTask.getStatus().getVersionConflicts()); @@ -358,7 +360,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testBulkFailuresAbortRequest() throws Exception { Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); - BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()); + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] + {new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -764,33 +767,29 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()]; for (int i = 0; i < bulk.requests().size(); i++) { - ActionRequest item = bulk.requests().get(i); - String opType; + DocWriteRequest item = bulk.requests().get(i); DocWriteResponse response; - ShardId shardId = new ShardId(new Index(((ReplicationRequest) item).index(), "uuid"), 0); + ShardId shardId = new ShardId(new Index(item.index(), "uuid"), 0); if (item instanceof IndexRequest) { IndexRequest index = (IndexRequest) item; - opType = index.opType().lowercase(); response = new IndexResponse(shardId, index.type(), index.id(), randomIntBetween(0, Integer.MAX_VALUE), true); } else if (item instanceof UpdateRequest) { UpdateRequest update = (UpdateRequest) item; - opType = "update"; response = new UpdateResponse(shardId, update.type(), update.id(), - randomIntBetween(0, Integer.MAX_VALUE), DocWriteResponse.Result.CREATED); + randomIntBetween(0, Integer.MAX_VALUE), Result.CREATED); } else if (item instanceof DeleteRequest) { DeleteRequest delete = (DeleteRequest) item; - opType = "delete"; response = new DeleteResponse(shardId, delete.type(), delete.id(), randomIntBetween(0, Integer.MAX_VALUE), true); } else { throw new RuntimeException("Unknown request: " + item); } if (i == toReject) { - responses[i] = new BulkItemResponse(i, opType, + responses[i] = new BulkItemResponse(i, item.opType(), new Failure(response.getIndex(), response.getType(), response.getId(), new EsRejectedExecutionException())); } else { - responses[i] = new BulkItemResponse(i, opType, response); + responses[i] = new BulkItemResponse(i, item.opType(), response); } } listener.onResponse((Response) new BulkResponse(responses, 1)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index b81be4a1bb2..9bfa41da7f3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE; +import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java index 2988fcb5ca6..1ab0613103f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.get.GetResponse; -import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE; +import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE; import static org.elasticsearch.index.VersionType.EXTERNAL; import static org.elasticsearch.index.VersionType.INTERNAL; diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java index e961f497bcd..c19cbbb7c57 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java @@ -125,7 +125,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_TCP_NO_DELAY = @@ -281,34 +281,42 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem @Override protected void doStart() { - this.serverOpenChannels = new Netty3OpenChannelsHandler(logger); - if (blockingServer) { - serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)) - )); - } else { - serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)), - workerCount)); - } - serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory()); + boolean success = false; + try { + this.serverOpenChannels = new Netty3OpenChannelsHandler(logger); + if (blockingServer) { + serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)) + )); + } else { + serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)), + workerCount)); + } + serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory()); - serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); - serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); - if (tcpSendBufferSize.getBytes() > 0) { + serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); + serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); + if (tcpSendBufferSize.getBytes() > 0) { - serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes()); + serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes()); + } + if (tcpReceiveBufferSize.getBytes() > 0) { + serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes()); + } + serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); + serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); + serverBootstrap.setOption("reuseAddress", reuseAddress); + serverBootstrap.setOption("child.reuseAddress", reuseAddress); + this.boundAddress = createBoundHttpAddress(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } } - if (tcpReceiveBufferSize.getBytes() > 0) { - serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes()); - } - serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); - serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); - serverBootstrap.setOption("reuseAddress", reuseAddress); - serverBootstrap.setOption("child.reuseAddress", reuseAddress); - this.boundAddress = createBoundHttpAddress(); } private BoundTransportAddress createBoundHttpAddress() { @@ -402,24 +410,21 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem private TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = port.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - synchronized (serverChannels) { - Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); - serverChannels.add(channel); - boundSocket.set((InetSocketAddress) channel.getLocalAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); + serverChannels.add(channel); + boundSocket.set((InetSocketAddress) channel.getLocalAddress()); } - return true; + } catch (Exception e) { + lastException.set(e); + return false; } + return true; }); if (!success) { - throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); } if (logger.isDebugEnabled()) { diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java index eb8d14b08fc..9d71fec9c90 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java @@ -92,7 +92,7 @@ public class Netty3Transport extends TcpTransport { public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java index d291f76ff38..6ab4dbd709f 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java @@ -22,8 +22,10 @@ package org.elasticsearch.http.netty3; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.netty3.cors.Netty3CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -103,4 +105,17 @@ public class Netty3HttpServerTransportTests extends ESTestCase { assertThat(corsConfig.allowedRequestMethods().stream().map(HttpMethod::getName).collect(Collectors.toSet()), equalTo(methods)); transport.close(); } + + public void testBindUnavailableAddress() { + try (Netty3HttpServerTransport transport = new Netty3HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (Netty3HttpServerTransport otherTransport = new Netty3HttpServerTransport(settings, networkService, bigArrays, + threadPool)) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java index e57d36cbc58..b7f20df75a5 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java @@ -28,9 +28,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -78,4 +80,26 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); } } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 98d4eeca17b..20cdfe0a128 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -127,7 +127,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_TCP_NO_DELAY = @@ -285,40 +285,50 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem @Override protected void doStart() { - this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); + boolean success = false; + try { + this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); - serverBootstrap = new ServerBootstrap(); - if (blockingServer) { - serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - serverBootstrap.channel(OioServerSocketChannel.class); - } else { - serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - serverBootstrap.channel(NioServerSocketChannel.class); + serverBootstrap = new ServerBootstrap(); + if (blockingServer) { + serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, + HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); + serverBootstrap.channel(OioServerSocketChannel.class); + } else { + serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, + HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); + serverBootstrap.channel(NioServerSocketChannel.class); + } + + serverBootstrap.childHandler(configureServerChannelHandler()); + + serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)); + serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); + serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + this.boundAddress = createBoundHttpAddress(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } } - - serverBootstrap.childHandler(configureServerChannelHandler()); - - serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)); - serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); - - final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); - if (tcpSendBufferSize.getBytes() > 0) { - serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); - } - - final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); - if (tcpReceiveBufferSize.getBytes() > 0) { - serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); - } - - serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - - final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); - serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); - - this.boundAddress = createBoundHttpAddress(); } private BoundTransportAddress createBoundHttpAddress() { @@ -417,24 +427,21 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem private TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = port.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - synchronized (serverChannels) { - ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync(); - serverChannels.add(future.channel()); - boundSocket.set((InetSocketAddress) future.channel().localAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync(); + serverChannels.add(future.channel()); + boundSocket.set((InetSocketAddress) future.channel().localAddress()); } - return true; + } catch (Exception e) { + lastException.set(e); + return false; } + return true; }); if (!success) { - throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); } if (logger.isDebugEnabled()) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 3787b29ab63..77429788317 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -95,7 +95,7 @@ public class Netty4Transport extends TcpTransport { public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index ceed9d9a503..498daf63226 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; @@ -123,7 +124,7 @@ public class Netty4HttpServerTransportTests extends ESTestCase { transport.httpServerAdapter((request, channel, context) -> channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done")))); transport.start(); - TransportAddress remoteAddress = (TransportAddress) randomFrom(transport.boundAddress().boundAddresses()); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (Netty4HttpClient client = new Netty4HttpClient()) { FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); @@ -140,4 +141,17 @@ public class Netty4HttpServerTransportTests extends ESTestCase { } } } + + public void testBindUnavailableAddress() { + try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, + threadPool)) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 93468d8f2ea..a7a674007ba 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -28,9 +28,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -79,4 +81,26 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase } } + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } + } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index a734f7b1bac..f97492aa7ab 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -36,18 +36,6 @@ "type" : "string", "description" : "The field to use as default where no field prefix is given in the query string" }, - "explain": { - "type" : "boolean", - "description" : "Specify whether to return detailed information about score computation as part of a hit" - }, - "stored_fields": { - "type" : "list", - "description" : "A comma-separated list of stored fields to return as part of a hit" - }, - "docvalue_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, "from": { "type" : "number", "description" : "Starting offset (default: 0)" @@ -94,7 +82,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -134,32 +122,6 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "suggest_field": { - "type" : "string", - "description" : "Specify which field to use for suggestions" - }, - "suggest_mode": { - "type" : "enum", - "options" : ["missing", "popular", "always"], - "default" : "missing", - "description" : "Specify suggest mode" - }, - "suggest_size": { - "type" : "number", - "description" : "How many suggestions to return in response" - }, - "suggest_text": { - "type" : "text", - "description" : "The source text for which the suggestions should be returned" - }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, - "track_scores": { - "type" : "boolean", - "description": "Whether to calculate and return scores even if they are not used for sorting" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" @@ -182,7 +144,7 @@ "description" : "Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, "scroll_size": { - "type": "integer", + "type": "number", "defaut_value": 100, "description": "Size on the scroll request powering the update_by_query" }, @@ -192,9 +154,9 @@ "description" : "Should the request should block until the delete-by-query is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, - "description": "The throttle for this request in sub-requests per second. -1 means set no throttle." + "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 677219addee..814a53c1141 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -54,7 +54,7 @@ "description" : "Explicit timestamp for the document" }, "ttl": { - "type" : "duration", + "type" : "time", "description" : "Expiration time for the document" }, "version" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index 5fb4fe58db3..79f3b883767 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -26,7 +26,7 @@ "description" : "Should the request should block until the reindex is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json index 5be7ea27407..4bba41d37d5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json @@ -13,7 +13,7 @@ }, "params": { "requests_per_second": { - "type": "float", + "type": "number", "required": true, "description": "The throttle to set on this request in floating sub-requests per second. -1 means set no throttle." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json index 885b746d095..699ddcc9e00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json @@ -13,7 +13,7 @@ }, "params": { "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "scroll_id": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 21fda8dc805..5aa7a409a06 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -89,7 +89,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -140,7 +140,7 @@ "description" : "How many suggestions to return in response" }, "suggest_text": { - "type" : "text", + "type" : "string", "description" : "The source text for which the suggestions should be returned" }, "timeout": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index ff1d35bb417..b9339b55332 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -39,7 +39,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index d87e4c5e7f5..7e7fffcee07 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -73,7 +73,7 @@ "description": "Explicit timestamp for the document" }, "ttl": { - "type": "duration", + "type": "time", "description": "Expiration time for the document" }, "version": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index b7f608b8b4f..4b9e76ac59f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -36,22 +36,6 @@ "type" : "string", "description" : "The field to use as default where no field prefix is given in the query string" }, - "explain": { - "type" : "boolean", - "description" : "Specify whether to return detailed information about score computation as part of a hit" - }, - "stored_fields": { - "type" : "list", - "description" : "A comma-separated list of stored fields to return as part of a hit" - }, - "docvalue_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, - "fielddata_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, "from": { "type" : "number", "description" : "Starting offset (default: 0)" @@ -69,7 +53,7 @@ "type" : "enum", "options": ["abort", "proceed"], "default": "abort", - "description" : "What to do when the reindex hits version conflicts?" + "description" : "What to do when the update by query hits version conflicts?" }, "expand_wildcards": { "type" : "enum", @@ -102,7 +86,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -142,32 +126,6 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "suggest_field": { - "type" : "string", - "description" : "Specify which field to use for suggestions" - }, - "suggest_mode": { - "type" : "enum", - "options" : ["missing", "popular", "always"], - "default" : "missing", - "description" : "Specify suggest mode" - }, - "suggest_size": { - "type" : "number", - "description" : "How many suggestions to return in response" - }, - "suggest_text": { - "type" : "text", - "description" : "The source text for which the suggestions should be returned" - }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, - "track_scores": { - "type" : "boolean", - "description": "Whether to calculate and return scores even if they are not used for sorting" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" @@ -194,17 +152,17 @@ "description" : "Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, "scroll_size": { - "type": "integer", + "type": "number", "defaut_value": 100, "description": "Size on the scroll request powering the update_by_query" }, "wait_for_completion": { "type" : "boolean", "default": false, - "description" : "Should the request should block until the reindex is complete." + "description" : "Should the request should block until the update by query operation is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml index 1923377ba83..4ea921a3fa0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml @@ -4,7 +4,9 @@ setup: index: test_1 body: aliases: - alias_1: {} + alias_1: { + "filter" : { "term" : { "foo" : "bar"} } + } - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml index 6c93dabeec7..bc2dace0e18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml @@ -22,8 +22,7 @@ Test reset index settings: - do: indices.get_settings: flat_settings: false - - is_false: - test-index.settings.index\.refresh_interval + - is_false: test-index.settings.index\.refresh_interval - do: indices.get_settings: include_defaults: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml index 14e258a6bb4..66da068895f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml @@ -31,4 +31,5 @@ - do: indices.get_mapping: index: test - - is_true: test_2 # the name of the index that the alias points to, would be `test` if the index were still there + # the name of the index that the alias points to, would be `test` if the index were still there + - is_true: test_2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml index a1f9aa87636..637ebd4253e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml @@ -5,12 +5,24 @@ setup: body: settings: number_of_replicas: 0 + aliases: + alias_1: { + "filter" : { "match_all" : {} } + } --- "Validate query api": - do: indices.validate_query: q: query string + index: testing + + - is_true: valid + + - do: + indices.validate_query: + q: query string + index: alias_1 - is_true: valid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml index 5443059135a..1695bdb2352 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml @@ -21,10 +21,9 @@ scroll: 1m sort: foo body: - slice: { - id: 0, + slice: + id: 0 max: 3 - } query: match_all: {} @@ -41,10 +40,9 @@ size: 1 scroll: 1m body: - slice: { - id: 0, + slice: + id: 0 max: 1025 - } query: match_all: {} @@ -60,10 +58,9 @@ size: 1 scroll: 1m body: - slice: { - id: 0, + slice: + id: 0 max: 1025 - } query: match_all: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index c35e79e6cfe..029b44544fd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -47,30 +47,30 @@ setup: type: test id: 3 body: { "str": "bcd" } - + - do: indices.refresh: {} - + - do: search: body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str" } } } } - match: { hits.total: 3 } - + - length: { aggregations.str_terms.buckets: 2 } - + - match: { aggregations.str_terms.buckets.0.key: "abc" } - + - is_false: aggregations.str_terms.buckets.0.key_as_string - + - match: { aggregations.str_terms.buckets.0.doc_count: 2 } - + - match: { aggregations.str_terms.buckets.1.key: "bcd" } - + - is_false: aggregations.str_terms.buckets.1.key_as_string - + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } - + --- "IP test": - do: @@ -112,9 +112,9 @@ setup: - match: { aggregations.ip_terms.buckets.0.doc_count: 2 } - match: { aggregations.ip_terms.buckets.1.key: "127.0.0.1" } - + - is_false: aggregations.ip_terms.buckets.1.key_as_string - + - match: { aggregations.ip_terms.buckets.1.doc_count: 1 } - do: @@ -142,7 +142,7 @@ setup: search: body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : "127.*" } } } } - + --- "Boolean test": @@ -327,7 +327,7 @@ setup: - match: { aggregations.date_terms.buckets.1.key_as_string: "2014-09-01T00:00:00.000Z" } - match: { aggregations.date_terms.buckets.1.doc_count: 1 } - + - do: search: body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "include" : [ "2016-05-03" ] } } } } @@ -335,11 +335,11 @@ setup: - match: { hits.total: 3 } - length: { aggregations.date_terms.buckets: 1 } - + - match: { aggregations.date_terms.buckets.0.key_as_string: "2016-05-03T00:00:00.000Z" } - - - match: { aggregations.date_terms.buckets.0.doc_count: 2 } - + + - match: { aggregations.date_terms.buckets.0.doc_count: 2 } + - do: search: body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "exclude" : [ "2016-05-03" ] } } } } @@ -347,7 +347,7 @@ setup: - match: { hits.total: 3 } - length: { aggregations.date_terms.buckets: 1 } - + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } - - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml index 98e61dd9fa9..100b44dcb04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml @@ -5,19 +5,14 @@ setup: index: test body: mappings: - type_1: { - properties: { - nested_field : { + type_1: + properties: + nested_field: type: nested - } - } - } type_2: {} - type_3: { - _parent: { + type_3: + _parent: type: type_2 - } - } --- "Nested inner hits": @@ -26,13 +21,8 @@ setup: index: test type: type_1 id: 1 - body: { - "nested_field" : [ - { - "foo": "bar" - } - ] - } + body: + "nested_field" : [ { "foo": "bar" } ] - do: indices.refresh: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml index da7af85cf9f..d47b52ce02c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml @@ -15,28 +15,33 @@ setup: "suggest_context": "type" : "completion" "contexts": - - "name" : "color" + - + "name" : "color" "type" : "category" "suggest_context_with_path": "type" : "completion" "contexts": - - "name" : "color" + - + "name" : "color" "type" : "category" "path" : "color" "suggest_geo": "type" : "completion" "contexts": - - "name" : "location" + - + "name" : "location" "type" : "geo" "precision" : "5km" "suggest_multi_contexts": "type" : "completion" "contexts": - - "name" : "location" + - + "name" : "location" "type" : "geo" "precision" : "5km" "path" : "location" - - "name" : "color" + - + "name" : "color" "type" : "category" "path" : "color"