From 80ca78479f5046f58374ed831f6b2c3d51f530f9 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Mon, 22 Aug 2016 13:08:49 -0400 Subject: [PATCH 01/53] Make bulk item-level requests implement DocumentRequest interface Currently, bulk item requests can be any ActionRequest, this commit restricts bulk item requests to DocumentRequest. This simplifies handling failures during bulk requests. Additionally, a new enum is added to DocumentRequest to represent the intended operation to be performed by a document request. Now, index operation type also uses the new enum to specify whether the request should create or index a document. --- .../elasticsearch/action/DocumentRequest.java | 82 +++++++- .../action/bulk/BulkItemRequest.java | 32 +-- .../action/bulk/BulkItemResponse.java | 17 +- .../action/bulk/BulkProcessor.java | 12 +- .../action/bulk/BulkRequest.java | 37 ++-- .../action/bulk/TransportBulkAction.java | 182 +++++------------- .../action/bulk/TransportShardBulkAction.java | 25 ++- .../action/delete/DeleteRequest.java | 13 +- .../action/index/IndexRequest.java | 93 ++------- .../action/index/IndexRequestBuilder.java | 7 +- .../action/ingest/IngestActionFilter.java | 9 +- .../termvectors/TermVectorsRequest.java | 8 +- .../action/update/UpdateRequest.java | 16 +- .../ingest/PipelineExecutionService.java | 6 +- .../rest/action/document/RestIndexAction.java | 2 +- .../action/bulk/BulkRequestTests.java | 4 +- .../action/bulk/BulkWithUpdatesIT.java | 35 ++-- .../elasticsearch/action/bulk/RetryTests.java | 9 +- .../action/index/IndexRequestTests.java | 19 +- .../ingest/BulkRequestModifierTests.java | 6 +- .../ingest/IngestActionFilterTests.java | 6 +- .../document/DocumentActionsIT.java | 11 +- .../ingest/PipelineExecutionServiceTests.java | 4 +- .../routing/SimpleRoutingIT.java | 8 +- .../versioning/SimpleVersioningIT.java | 5 +- .../AbstractAsyncBulkByScrollAction.java | 27 ++- .../AbstractAsyncBulkIndexByScrollAction.java | 6 +- .../reindex/AsyncBulkByScrollActionTests.java | 34 ++-- .../index/reindex/ReindexFailureTests.java | 2 +- .../index/reindex/ReindexVersioningTests.java | 2 +- 30 files changed, 339 insertions(+), 380 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index a90f013a6b9..50af0dc780d 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -19,11 +19,13 @@ package org.elasticsearch.action; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.index.VersionType; + +import java.util.Locale; /** - * Generic interface to group ActionRequest, which work on single document level - * - * Forces this class return index/type/id getters + * Generic interface to group ActionRequest, which perform writes to a single document + * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} */ public interface DocumentRequest extends IndicesRequest { @@ -70,4 +72,78 @@ public interface DocumentRequest extends IndicesRequest { */ String parent(); + /** + * Get the document version for this request + * @return the document version + */ + long version(); + + /** + * Sets the version, which will perform the operation only if a matching + * version exists and no changes happened on the doc since then. + */ + T version(long version); + + /** + * Get the document version type for this request + * @return the document version type + */ + VersionType versionType(); + + /** + * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. + */ + T versionType(VersionType versionType); + + /** + * Get the requested document operation type of the request + * @return the operation type {@link OpType} + */ + OpType opType(); + + /** + * Requested operation type to perform on the document + */ + enum OpType { + /** + * Creates the resource. Simply adds it to the index, if there is an existing + * document with the id, then it won't be removed. + */ + CREATE(0), + /** + * Index the source. If there an existing document with the id, it will + * be replaced. + */ + INDEX(1), + /** Updates a document */ + UPDATE(2), + /** Deletes a document */ + DELETE(3); + + private final byte op; + private final String lowercase; + + OpType(int op) { + this.op = (byte) op; + this.lowercase = this.toString().toLowerCase(Locale.ENGLISH); + } + + public byte getId() { + return op; + } + + public String getLowercase() { + return lowercase; + } + + public static OpType fromId(byte id) { + switch (id) { + case 0: return CREATE; + case 1: return INDEX; + case 2: return UPDATE; + case 3: return DELETE; + default: throw new IllegalArgumentException("Unknown opType: [" + id + "]"); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 760c5781aea..79503fcf9ee 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -19,8 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; @@ -36,7 +35,7 @@ import java.io.IOException; public class BulkItemRequest implements Streamable { private int id; - private ActionRequest request; + private DocumentRequest request; private volatile BulkItemResponse primaryResponse; private volatile boolean ignoreOnReplica; @@ -44,8 +43,7 @@ public class BulkItemRequest implements Streamable { } - public BulkItemRequest(int id, ActionRequest request) { - assert request instanceof IndicesRequest; + public BulkItemRequest(int id, DocumentRequest request) { this.id = id; this.request = request; } @@ -54,14 +52,13 @@ public class BulkItemRequest implements Streamable { return id; } - public ActionRequest request() { + public DocumentRequest request() { return request; } public String index() { - IndicesRequest indicesRequest = (IndicesRequest) request; - assert indicesRequest.indices().length == 1; - return indicesRequest.indices()[0]; + assert request.indices().length == 1; + return request.indices()[0]; } BulkItemResponse getPrimaryResponse() { @@ -94,13 +91,18 @@ public class BulkItemRequest implements Streamable { id = in.readVInt(); byte type = in.readByte(); if (type == 0) { - request = new IndexRequest(); + IndexRequest indexRequest = new IndexRequest(); + indexRequest.readFrom(in); + request = indexRequest; } else if (type == 1) { - request = new DeleteRequest(); + DeleteRequest deleteRequest = new DeleteRequest(); + deleteRequest.readFrom(in); + request = deleteRequest; } else if (type == 2) { - request = new UpdateRequest(); + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.readFrom(in); + request = updateRequest; } - request.readFrom(in); if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } @@ -112,12 +114,14 @@ public class BulkItemRequest implements Streamable { out.writeVInt(id); if (request instanceof IndexRequest) { out.writeByte((byte) 0); + ((IndexRequest) request).writeTo(out); } else if (request instanceof DeleteRequest) { out.writeByte((byte) 1); + ((DeleteRequest) request).writeTo(out); } else if (request instanceof UpdateRequest) { out.writeByte((byte) 2); + ((UpdateRequest) request).writeTo(out); } - request.writeTo(out); out.writeOptionalStreamable(primaryResponse); out.writeBoolean(ignoreOnReplica); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index ad45ace84c9..adeda64ee56 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; @@ -50,7 +51,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(opType); + builder.startObject(opType.getLowercase()); if (failure == null) { response.toXContent(builder, params); builder.field(Fields.STATUS, response.status().getStatus()); @@ -183,7 +184,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { private int id; - private String opType; + private OpType opType; private DocWriteResponse response; @@ -193,13 +194,13 @@ public class BulkItemResponse implements Streamable, StatusToXContent { } - public BulkItemResponse(int id, String opType, DocWriteResponse response) { + public BulkItemResponse(int id, OpType opType, DocWriteResponse response) { this.id = id; - this.opType = opType; this.response = response; + this.opType = opType; } - public BulkItemResponse(int id, String opType, Failure failure) { + public BulkItemResponse(int id, OpType opType, Failure failure) { this.id = id; this.opType = opType; this.failure = failure; @@ -215,7 +216,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { /** * The operation type ("index", "create" or "delete"). */ - public String getOpType() { + public OpType getOpType() { return this.opType; } @@ -300,7 +301,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - opType = in.readString(); + opType = OpType.fromId(in.readByte()); byte type = in.readByte(); if (type == 0) { @@ -322,7 +323,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - out.writeString(opType); + out.writeByte(opType.getId()); if (response == null) { out.writeByte((byte) 2); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index c54b3588c17..4881a9444bb 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; @@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkProcessor add(IndexRequest request) { - return add((ActionRequest) request); + return add((DocumentRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((ActionRequest) request); + return add((DocumentRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(ActionRequest request) { + public BulkProcessor add(DocumentRequest request) { return add(request, null); } - public BulkProcessor add(ActionRequest request, @Nullable Object payload) { + public BulkProcessor add(DocumentRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocumentRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 7e7aa4ce603..538dfc4c3a5 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -46,6 +47,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -65,7 +67,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List> requests = new ArrayList<>(); + final List> requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -80,14 +82,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(ActionRequest... requests) { - for (ActionRequest request : requests) { + public BulkRequest add(DocumentRequest... requests) { + for (DocumentRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(ActionRequest request) { + public BulkRequest add(DocumentRequest request) { return add(request, null); } @@ -97,7 +99,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(ActionRequest request, @Nullable Object payload) { + public BulkRequest add(DocumentRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -113,8 +115,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable> requests) { - for (ActionRequest request : requests) { + public BulkRequest add(Iterable> requests) { + for (DocumentRequest request : requests) { add(request); } return this; @@ -200,18 +202,13 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List> requests() { + public List> requests() { return this.requests; } @Override public List subRequests() { - List indicesRequests = new ArrayList<>(); - for (ActionRequest request : requests) { - assert request instanceof IndicesRequest; - indicesRequests.add((IndicesRequest) request); - } - return indicesRequests; + return requests.stream().collect(Collectors.toList()); } /** @@ -497,7 +494,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (ActionRequest actionRequest : requests) { + for (DocumentRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -515,13 +512,13 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (ActionRequest request : requests) { + for (DocumentRequest request : requests) { // We first check if refresh has been set if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } - ActionRequestValidationException ex = request.validate(); + ActionRequestValidationException ex = ((WriteRequest) request).validate(); if (ex != null) { if (validationException == null) { validationException = new ActionRequestValidationException(); @@ -563,15 +560,17 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (ActionRequest request : requests) { + for (DocumentRequest request : requests) { if (request instanceof IndexRequest) { out.writeByte((byte) 0); + ((IndexRequest) request).writeTo(out); } else if (request instanceof DeleteRequest) { out.writeByte((byte) 1); + ((DeleteRequest) request).writeTo(out); } else if (request instanceof UpdateRequest) { out.writeByte((byte) 2); + ((UpdateRequest) request).writeTo(out); } - request.writeTo(out); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index da080b54b25..f7861d1e093 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -19,11 +19,9 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -58,18 +56,18 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; +import java.util.stream.Collectors; /** - * + * Groups bulk request items by shard, optionally creating non-existent indices and + * delegates to {@link TransportShardBulkAction} for shard-level bulk execution */ public class TransportBulkAction extends HandledTransportAction { @@ -119,15 +117,9 @@ public class TransportBulkAction extends HandledTransportAction autoCreateIndices = new HashSet<>(); - for (ActionRequest request : bulkRequest.requests) { - if (request instanceof DocumentRequest) { - DocumentRequest req = (DocumentRequest) request; - autoCreateIndices.add(req.index()); - } else { - throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); - } - } + final Set autoCreateIndices = bulkRequest.requests.stream() + .map(DocumentRequest::index) + .collect(Collectors.toSet()); final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); ClusterState state = clusterService.state(); for (String index : autoCreateIndices) { @@ -153,7 +145,7 @@ public class TransportBulkAction extends HandledTransportAction request = bulkRequest.requests.get(i); if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) { bulkRequest.requests.set(i, null); } @@ -188,27 +180,10 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, ActionRequest request, String index, Exception e) { - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - if (index.equals(indexRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "index", new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e))); - return true; - } - } else if (request instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) request; - if (index.equals(deleteRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "delete", new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), e))); - return true; - } - } else if (request instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) request; - if (index.equals(updateRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "update", new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), e))); - return true; - } - } else { - throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocumentRequest request, String index, Exception e) { + if (index.equals(request.index())) { + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); + return true; } return false; } @@ -236,95 +211,56 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = bulkRequest.requests.get(i); //the request can only be null because we set it to null in the previous step, so it gets ignored - if (request == null) { + if (documentRequest == null) { continue; } - DocumentRequest documentRequest = (DocumentRequest) request; if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) { continue; } Index concreteIndex = concreteIndices.resolveIfAbsent(documentRequest); - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - MappingMetaData mappingMd = null; - final IndexMetaData indexMetaData = metaData.index(concreteIndex); - if (indexMetaData != null) { - mappingMd = indexMetaData.mappingOrDefault(indexRequest.type()); + try { + switch (documentRequest.opType()) { + case CREATE: + case INDEX: + IndexRequest indexRequest = (IndexRequest) documentRequest; + MappingMetaData mappingMd = null; + final IndexMetaData indexMetaData = metaData.index(concreteIndex); + if (indexMetaData != null) { + mappingMd = indexMetaData.mappingOrDefault(indexRequest.type()); + } + indexRequest.resolveRouting(metaData); + indexRequest.process(mappingMd, allowIdGeneration, concreteIndex.getName()); + break; + case UPDATE: + TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest)documentRequest); + break; + case DELETE: + TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest)documentRequest); + break; + default: throw new AssertionError("request type not supported: [" + documentRequest.opType() + "]"); } - try { - indexRequest.resolveRouting(metaData); - indexRequest.process(mappingMd, allowIdGeneration, concreteIndex.getName()); - } catch (ElasticsearchParseException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), indexRequest.type(), indexRequest.id(), e); - BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure); - responses.set(i, bulkItemResponse); - // make sure the request gets never processed again - bulkRequest.requests.set(i, null); - } - } else if (request instanceof DeleteRequest) { - try { - TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest)request); - } catch(RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e); - BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure); - responses.set(i, bulkItemResponse); - // make sure the request gets never processed again - bulkRequest.requests.set(i, null); - } - - } else if (request instanceof UpdateRequest) { - try { - TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest)request); - } catch(RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e); - BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure); - responses.set(i, bulkItemResponse); - // make sure the request gets never processed again - bulkRequest.requests.set(i, null); - } - } else { - throw new AssertionError("request type not supported: [" + request.getClass().getName() + "]"); + } catch (ElasticsearchParseException | RoutingMissingException e) { + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e); + BulkItemResponse bulkItemResponse = new BulkItemResponse(i, documentRequest.opType(), failure); + responses.set(i, bulkItemResponse); + // make sure the request gets never processed again + bulkRequest.requests.set(i, null); } } // first, go over all the requests and create a ShardId -> Operations mapping Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - ActionRequest request = bulkRequest.requests.get(i); - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.id(), indexRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); - } else if (request instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.id(), deleteRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); - } else if (request instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.id(), updateRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); + DocumentRequest request = bulkRequest.requests.get(i); + if (request == null) { + continue; } + String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); + ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); + List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); + shardRequests.add(new BulkItemRequest(i, request)); } if (requestsByShard.isEmpty()) { @@ -364,19 +300,9 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = request.request(); + responses.set(request.id(), new BulkItemResponse(request.id(), documentRequest.opType(), + new BulkItemResponse.Failure(indexName, documentRequest.type(), documentRequest.id(), e))); } if (counter.decrementAndGet() == 0) { finishHim(); @@ -413,15 +339,7 @@ public class TransportBulkAction extends HandledTransportAction { - private static final String OP_TYPE_UPDATE = "update"; - private static final String OP_TYPE_DELETE = "delete"; - public static final String ACTION_NAME = BulkAction.NAME + "[s]"; private final UpdateHelper updateHelper; @@ -157,7 +154,7 @@ public class TransportShardBulkAction extends TransportWriteAction writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); DeleteResponse deleteResponse = writeResult.getResponse(); location = locationToSync(location, writeResult.getLocation()); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); + setResponse(item, new BulkItemResponse(item.id(), deleteRequest.opType(), deleteResponse)); } catch (Exception e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it if (retryPrimaryException(e)) { @@ -216,7 +213,7 @@ public class TransportShardBulkAction extends TransportWriteAction= updateRequest.retryOnConflict()) { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, + setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); } } else { @@ -299,20 +296,20 @@ public class TransportShardBulkAction extends TransportWriteAction impleme return this.routing; } - /** - * Sets the version, which will cause the delete operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public DeleteRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } + @Override public DeleteRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } + @Override + public OpType opType() { + return OpType.DELETE; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 63ede68b9fe..910abf8728b 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -69,67 +69,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; */ public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { - /** - * Operation type controls if the type of the index operation. - */ - public static enum OpType { - /** - * Index the source. If there an existing document with the id, it will - * be replaced. - */ - INDEX((byte) 0), - /** - * Creates the resource. Simply adds it to the index, if there is an existing - * document with the id, then it won't be removed. - */ - CREATE((byte) 1); - - private final byte id; - private final String lowercase; - - OpType(byte id) { - this.id = id; - this.lowercase = this.toString().toLowerCase(Locale.ENGLISH); - } - - /** - * The internal representation of the operation type. - */ - public byte id() { - return id; - } - - public String lowercase() { - return this.lowercase; - } - - /** - * Constructs the operation type from its internal representation. - */ - public static OpType fromId(byte id) { - if (id == 0) { - return INDEX; - } else if (id == 1) { - return CREATE; - } else { - throw new IllegalArgumentException("No type match for [" + id + "]"); - } - } - - public static OpType fromString(String sOpType) { - String lowersOpType = sOpType.toLowerCase(Locale.ROOT); - switch (lowersOpType) { - case "create": - return OpType.CREATE; - case "index": - return OpType.INDEX; - default: - throw new IllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); - } - } - - } - private String type; private String id; @Nullable @@ -506,6 +445,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement * Sets the type of operation to perform. */ public IndexRequest opType(OpType opType) { + if (opType != OpType.CREATE && opType != OpType.INDEX) { + throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]"); + } this.opType = opType; if (opType == OpType.CREATE) { version(Versions.MATCH_DELETED); @@ -515,11 +457,19 @@ public class IndexRequest extends ReplicatedWriteRequest implement } /** - * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can + * Sets a string representation of the {@link #opType(OpType)}. Can * be either "index" or "create". */ public IndexRequest opType(String opType) { - return opType(OpType.fromString(opType)); + String op = opType.toLowerCase(Locale.ROOT); + if (op.equals("create")) { + opType(OpType.CREATE); + } else if (op.equals("index")) { + opType(OpType.INDEX); + } else { + throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]"); + } + return this; } @@ -534,34 +484,29 @@ public class IndexRequest extends ReplicatedWriteRequest implement } } - /** - * The type of operation to perform. - */ + @Override public OpType opType() { return this.opType; } - /** - * Sets the version, which will cause the index operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public IndexRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } - /** - * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. - */ + @Override public IndexRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } @@ -651,7 +596,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeOptionalString(timestamp); out.writeOptionalWriteable(ttl); out.writeBytesReference(source); - out.writeByte(opType.id()); + out.writeByte(opType.getId()); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 20587bf0ea9..7d567b4bdba 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -200,17 +201,17 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder> { + static final class BulkRequestModifier implements Iterator> { final BulkRequest bulkRequest; final Set failedSlots; @@ -148,7 +149,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public ActionRequest next() { + public DocumentRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -169,7 +170,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - ActionRequest request = bulkRequest.requests().get(i); + DocumentRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; @@ -205,7 +206,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio // 3) Continue with the next request in the bulk. failedSlots.add(currentSlot); BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e); - itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType().lowercase(), failure)); + itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure)); } } diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 3f33b2e3901..1dd9b0c7d79 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.get.MultiGetRequest; @@ -56,7 +55,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; * Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are * required. */ -public class TermVectorsRequest extends SingleShardRequest implements DocumentRequest, RealtimeRequest { +public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { private String type; @@ -200,7 +199,6 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Returns the type of document to get the term vector for. */ - @Override public String type() { return type; } @@ -208,7 +206,6 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Returns the id of document the term vector is requested for. */ - @Override public String id() { return id; } @@ -250,18 +247,15 @@ public class TermVectorsRequest extends SingleShardRequest i /** * @return The routing for this request. */ - @Override public String routing() { return routing; } - @Override public TermVectorsRequest routing(String routing) { this.routing = routing; return this; } - @Override public String parent() { return parent; } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 0d919ff0892..662d26117b1 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -398,31 +398,33 @@ public class UpdateRequest extends InstanceShardOperationRequest return this.retryOnConflict; } - /** - * Sets the version, which will cause the index operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public UpdateRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } - /** - * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. - */ + @Override public UpdateRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } + @Override + public OpType opType() { + return OpType.UPDATE; + } + @Override public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { this.refreshPolicy = refreshPolicy; diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index e7146636534..57eb7afcb5a 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable> actionRequests, + public void executeBulkRequest(Iterable> actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (ActionRequest actionRequest : actionRequests) { + for (DocumentRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index 6c9723b5b93..f28a98f4888 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -86,7 +86,7 @@ public class RestIndexAction extends BaseRestHandler { String sOpType = request.param("op_type"); if (sOpType != null) { try { - indexRequest.opType(IndexRequest.OpType.fromString(sOpType)); + indexRequest.opType(sOpType); } catch (IllegalArgumentException eia){ try { XContentBuilder builder = channel.newErrorBuilder(); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 142fb282c20..c88055f8dd3 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -111,7 +111,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List> requests = new ArrayList<>(); + List> requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 16502ff92b1..4c24e76c13f 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -47,6 +47,7 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.function.Function; +import static org.elasticsearch.action.DocumentRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -319,7 +320,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(1L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(1)); @@ -357,7 +358,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(2L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(2)); @@ -381,7 +382,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(3L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } } @@ -398,7 +399,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } builder = client().prepareBulk(); @@ -414,7 +415,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); for (int j = 0; j < 5; j++) { GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute() .actionGet(); @@ -755,12 +756,12 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertNoFailures(indexBulkItemResponse); assertThat(bulkItemResponse.getItems().length, is(6)); - assertThat(bulkItemResponse.getItems()[0].getOpType(), is("index")); - assertThat(bulkItemResponse.getItems()[1].getOpType(), is("index")); - assertThat(bulkItemResponse.getItems()[2].getOpType(), is("update")); - assertThat(bulkItemResponse.getItems()[3].getOpType(), is("update")); - assertThat(bulkItemResponse.getItems()[4].getOpType(), is("delete")); - assertThat(bulkItemResponse.getItems()[5].getOpType(), is("delete")); + assertThat(bulkItemResponse.getItems()[0].getOpType(), is(OpType.INDEX)); + assertThat(bulkItemResponse.getItems()[1].getOpType(), is(OpType.INDEX)); + assertThat(bulkItemResponse.getItems()[2].getOpType(), is(OpType.UPDATE)); + assertThat(bulkItemResponse.getItems()[3].getOpType(), is(OpType.UPDATE)); + assertThat(bulkItemResponse.getItems()[4].getOpType(), is(OpType.DELETE)); + assertThat(bulkItemResponse.getItems()[5].getOpType(), is(OpType.DELETE)); } private static String indexOrAlias() { @@ -805,9 +806,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(bulkResponse.hasFailures(), is(true)); BulkItemResponse[] responseItems = bulkResponse.getItems(); assertThat(responseItems.length, is(3)); - assertThat(responseItems[0].getOpType(), is("index")); - assertThat(responseItems[1].getOpType(), is("update")); - assertThat(responseItems[2].getOpType(), is("delete")); + assertThat(responseItems[0].getOpType(), is(OpType.INDEX)); + assertThat(responseItems[1].getOpType(), is(OpType.UPDATE)); + assertThat(responseItems[2].getOpType(), is(OpType.DELETE)); } // issue 9821 @@ -817,9 +818,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { .add(client().prepareUpdate().setIndex("INVALID.NAME").setType("type1").setId("1").setDoc("field", randomInt())) .add(client().prepareDelete().setIndex("INVALID.NAME").setType("type1").setId("1")).get(); assertThat(bulkResponse.getItems().length, is(3)); - assertThat(bulkResponse.getItems()[0].getOpType(), is("index")); - assertThat(bulkResponse.getItems()[1].getOpType(), is("update")); - assertThat(bulkResponse.getItems()[2].getOpType(), is("delete")); + assertThat(bulkResponse.getItems()[0].getOpType(), is(OpType.INDEX)); + assertThat(bulkResponse.getItems()[1].getOpType(), is(OpType.UPDATE)); + assertThat(bulkResponse.getItems()[2].getOpType(), is(OpType.DELETE)); } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 4fa640b3adc..72bdc8a58f9 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -20,7 +20,12 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentRequest.OpType; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; @@ -212,11 +217,11 @@ public class RetryTests extends ESTestCase { } private BulkItemResponse successfulResponse() { - return new BulkItemResponse(1, "update", new DeleteResponse()); + return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse()); } private BulkItemResponse failedResponse() { - return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); } } } diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index a8699dd3ea7..e6fcad5443c 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.VersionType; @@ -43,18 +44,24 @@ public class IndexRequestTests extends ESTestCase { String createUpper = "CREATE"; String indexUpper = "INDEX"; - assertThat(IndexRequest.OpType.fromString(create), equalTo(IndexRequest.OpType.CREATE)); - assertThat(IndexRequest.OpType.fromString(index), equalTo(IndexRequest.OpType.INDEX)); - assertThat(IndexRequest.OpType.fromString(createUpper), equalTo(IndexRequest.OpType.CREATE)); - assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX)); + IndexRequest indexRequest = new IndexRequest(""); + indexRequest.opType(create); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); + indexRequest.opType(createUpper); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); + indexRequest.opType(index); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); + indexRequest.opType(indexUpper); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); } public void testReadBogusString() { try { - IndexRequest.OpType.fromString("foobar"); + IndexRequest indexRequest = new IndexRequest(""); + indexRequest.opType("foobar"); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("opType [foobar] not allowed")); + assertThat(e.getMessage(), equalTo("opType must be 'create' or 'index', found: [foobar]")); } } diff --git a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java index 9ee5036131d..8dac5853cac 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; */ import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -116,10 +116,10 @@ public class BulkRequestModifierTests extends ESTestCase { }); List originalResponses = new ArrayList<>(); - for (ActionRequest actionRequest : bulkRequest.requests()) { + for (DocumentRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, true); - originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType().lowercase(), indexResponse)); + originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); } bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java index b04533fafc4..331a956e8ac 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { if (rarely()) { - ActionRequest request; + DocumentRequest request; if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); } else { @@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase { verifyZeroInteractions(actionListener); int assertedRequests = 0; - for (ActionRequest actionRequest : bulkRequest.requests()) { + for (DocumentRequest actionRequest : bulkRequest.requests()) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; assertThat(indexRequest.sourceAsMap().size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index 065128af918..abc07da0b39 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; +import static org.elasticsearch.action.DocumentRequest.OpType; import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; import static org.elasticsearch.client.Requests.getRequest; import static org.elasticsearch.client.Requests.indexRequest; @@ -190,31 +191,31 @@ public class DocumentActionsIT extends ESIntegTestCase { assertThat(bulkResponse.getItems().length, equalTo(5)); assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[0].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[0].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create")); + assertThat(bulkResponse.getItems()[1].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[1].getId(), equalTo("2")); assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[2].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1")); String generatedId3 = bulkResponse.getItems()[2].getId(); assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete")); + assertThat(bulkResponse.getItems()[3].getOpType(), equalTo(OpType.DELETE)); assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[3].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true)); - assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[4].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1")); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index 53964132abe..4cad7e5ab63 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -314,7 +314,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); int numIndexRequests = 0; for (int i = 0; i < numRequest; i++) { - ActionRequest request; + DocumentRequest request; if (randomBoolean()) { if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index d8cf1e7b5ec..5980f781e2e 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.routing; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -259,7 +261,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("index")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -280,7 +282,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("update")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -301,7 +303,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("delete")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 67e7d528e59..e43991efccc 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.versioning; import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -724,7 +725,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) + .setOpType(DocumentRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() @@ -793,7 +794,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) + .setOpType(DocumentRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 0178d2e1fb6..678ecad149d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -256,22 +256,21 @@ public abstract class AbstractAsyncBulkByScrollAction> { + interface RequestWrapper> { void setIndex(String index); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index f5fb927cda9..4ea7f039970 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -257,35 +259,36 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { BulkItemResponse[] responses = new BulkItemResponse[randomIntBetween(0, 100)]; for (int i = 0; i < responses.length; i++) { ShardId shardId = new ShardId(new Index("name", "uid"), 0); - String opType; if (rarely()) { - opType = randomSimpleString(random()); versionConflicts++; - responses[i] = new BulkItemResponse(i, opType, new Failure(shardId.getIndexName(), "type", "id" + i, + responses[i] = new BulkItemResponse(i, randomFrom(DocumentRequest.OpType.values()), + new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test"))); continue; } boolean createdResponse; + DocumentRequest.OpType opType; switch (randomIntBetween(0, 2)) { case 0: - opType = randomFrom("index", "create"); createdResponse = true; + opType = DocumentRequest.OpType.CREATE; created++; break; case 1: - opType = randomFrom("index", "create"); createdResponse = false; + opType = randomFrom(DocumentRequest.OpType.INDEX, DocumentRequest.OpType.UPDATE); updated++; break; case 2: - opType = "delete"; createdResponse = false; + opType = DocumentRequest.OpType.DELETE; deleted++; break; default: throw new RuntimeException("Bad scenario"); } - responses[i] = new BulkItemResponse(i, opType, new IndexResponse(shardId, "type", "id" + i, randomInt(), createdResponse)); + responses[i] = new BulkItemResponse(i, opType, + new IndexResponse(shardId, "type", "id" + i, randomInt(), createdResponse)); } new DummyAbstractAsyncBulkByScrollAction().onBulkResponse(timeValueNanos(System.nanoTime()), new BulkResponse(responses, 0)); assertEquals(versionConflicts, testTask.getStatus().getVersionConflicts()); @@ -359,7 +362,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testBulkFailuresAbortRequest() throws Exception { Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); - BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()); + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] + {new BulkItemResponse(0, DocumentRequest.OpType.CREATE, failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -765,33 +769,29 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()]; for (int i = 0; i < bulk.requests().size(); i++) { - ActionRequest item = bulk.requests().get(i); - String opType; + DocumentRequest item = bulk.requests().get(i); DocWriteResponse response; - ShardId shardId = new ShardId(new Index(((ReplicationRequest) item).index(), "uuid"), 0); + ShardId shardId = new ShardId(new Index(item.index(), "uuid"), 0); if (item instanceof IndexRequest) { IndexRequest index = (IndexRequest) item; - opType = index.opType().lowercase(); response = new IndexResponse(shardId, index.type(), index.id(), randomIntBetween(0, Integer.MAX_VALUE), true); } else if (item instanceof UpdateRequest) { UpdateRequest update = (UpdateRequest) item; - opType = "update"; response = new UpdateResponse(shardId, update.type(), update.id(), - randomIntBetween(0, Integer.MAX_VALUE), DocWriteResponse.Result.CREATED); + randomIntBetween(0, Integer.MAX_VALUE), Result.CREATED); } else if (item instanceof DeleteRequest) { DeleteRequest delete = (DeleteRequest) item; - opType = "delete"; response = new DeleteResponse(shardId, delete.type(), delete.id(), randomIntBetween(0, Integer.MAX_VALUE), true); } else { throw new RuntimeException("Unknown request: " + item); } if (i == toReject) { - responses[i] = new BulkItemResponse(i, opType, + responses[i] = new BulkItemResponse(i, item.opType(), new Failure(response.getIndex(), response.getType(), response.getId(), new EsRejectedExecutionException())); } else { - responses[i] = new BulkItemResponse(i, opType, response); + responses[i] = new BulkItemResponse(i, item.opType(), response); } } listener.onResponse((Response) new BulkResponse(responses, 1)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index b81be4a1bb2..c909ea42ecb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE; +import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java index 2988fcb5ca6..041c796b173 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.get.GetResponse; -import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE; +import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; import static org.elasticsearch.index.VersionType.EXTERNAL; import static org.elasticsearch.index.VersionType.INTERNAL; From cc993de9968bc77fa7674c797fd30a554b2c856f Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Mon, 22 Aug 2016 13:13:29 -0400 Subject: [PATCH 02/53] Simplify shard-level bulk operation execution This commit refactors execution of shard-level bulk operations to use the same failure handling for index, delete and update operations. --- .../action/bulk/TransportShardBulkAction.java | 400 +++++------------- 1 file changed, 105 insertions(+), 295 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 84a5197ea9f..7e73eacded1 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -21,7 +21,8 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.delete.TransportDeleteAction; @@ -29,7 +30,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.update.UpdateHelper; @@ -66,9 +66,7 @@ import java.util.Map; import static org.elasticsearch.action.support.replication.ReplicationOperation.ignoreReplicaException; import static org.elasticsearch.action.support.replication.ReplicationOperation.isConflictException; -/** - * Performs the index operation. - */ +/** Performs shard-level bulk (index, delete or update) operations */ public class TransportShardBulkAction extends TransportWriteAction { public static final String ACTION_NAME = BulkAction.NAME + "[s]"; @@ -114,8 +112,7 @@ public class TransportShardBulkAction extends TransportWriteAction(response, location); } - private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - if (item.request() instanceof IndexRequest) { - location = index(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - } else if (item.request() instanceof DeleteRequest) { - location = delete(request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - } else if (item.request() instanceof UpdateRequest) { - Tuple tuple = update(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - location = tuple.v1(); - item = tuple.v2(); - } else { - throw new IllegalStateException("Unexpected index operation: " + item.request()); + /** Executes bulk item requests and handles request execution exceptions */ + private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, + long[] preVersions, VersionType[] preVersionTypes, + Translog.Location location, int requestIndex) { + preVersions[requestIndex] = request.items()[requestIndex].request().version(); + preVersionTypes[requestIndex] = request.items()[requestIndex].request().versionType(); + DocumentRequest.OpType opType = request.items()[requestIndex].request().opType(); + try { + WriteResult writeResult = innerExecuteBulkItemRequest(metaData, indexShard, + request, requestIndex); + if (writeResult.getResponse().getResult() != DocWriteResponse.Result.NOOP) { + location = locationToSync(location, writeResult.getLocation()); + } + // update the bulk item request because update request execution can mutate the bulk item request + BulkItemRequest item = request.items()[requestIndex]; + // add the response + setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse())); + } catch (Exception e) { + // rethrow the failure if we are going to retry on primary and let parent failure to handle it + if (retryPrimaryException(e)) { + // restore updated versions... + for (int j = 0; j < requestIndex; j++) { + DocumentRequest documentRequest = request.items()[j].request(); + documentRequest.version(preVersions[j]); + documentRequest.versionType(preVersionTypes[j]); + } + throw (ElasticsearchException) e; + } + BulkItemRequest item = request.items()[requestIndex]; + DocumentRequest documentRequest = item.request(); + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { + logger.trace("{} failed to execute bulk item ({}) {}", e, request.shardId(), + documentRequest.opType().getLowercase(), request); + } else { + logger.debug("{} failed to execute bulk item ({}) {}", e, request.shardId(), + documentRequest.opType().getLowercase(), request); + } + // if its a conflict failure, and we already executed the request on a primary (and we execute it + // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) + // then just use the response we got from the successful execution + if (item.getPrimaryResponse() != null && isConflictException(e)) { + setResponse(item, item.getPrimaryResponse()); + } else { + setResponse(item, new BulkItemResponse(item.id(), documentRequest.opType(), + new BulkItemResponse.Failure(request.index(), documentRequest.type(), documentRequest.id(), e))); + } } - - assert item.getPrimaryResponse() != null; + assert request.items()[requestIndex].getPrimaryResponse() != null; assert preVersionTypes[requestIndex] != null; return location; } - private Translog.Location index(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - IndexRequest indexRequest = (IndexRequest) item.request(); - preVersions[requestIndex] = indexRequest.version(); - preVersionTypes[requestIndex] = indexRequest.versionType(); - try { - WriteResult result = shardIndexOperation(request, indexRequest, metaData, indexShard, true); - location = locationToSync(location, result.getLocation()); - // add the response - IndexResponse indexResponse = result.getResponse(); - setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType(), indexResponse)); - } catch (Exception e) { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - logFailure(e, "index", request.shardId(), indexRequest); - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else { - setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType(), - new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); - } - } - return location; - } - - private > void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest request) { - if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { - logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); - } else { - logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); - } - } - - private Translog.Location delete(BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - DeleteRequest deleteRequest = (DeleteRequest) item.request(); - preVersions[requestIndex] = deleteRequest.version(); - preVersionTypes[requestIndex] = deleteRequest.versionType(); - - try { - // add the response - final WriteResult writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = writeResult.getResponse(); - location = locationToSync(location, writeResult.getLocation()); - setResponse(item, new BulkItemResponse(item.id(), deleteRequest.opType(), deleteResponse)); - } catch (Exception e) { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - logFailure(e, "delete", request.shardId(), deleteRequest); - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else { - setResponse(item, new BulkItemResponse(item.id(), deleteRequest.opType(), - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); - } - } - return location; - } - - private Tuple update(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - UpdateRequest updateRequest = (UpdateRequest) item.request(); - preVersions[requestIndex] = updateRequest.version(); - preVersionTypes[requestIndex] = updateRequest.versionType(); - // We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE - for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) { - UpdateResult updateResult; - try { - updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard); - } catch (Exception t) { - updateResult = new UpdateResult(null, null, false, t, null); - } - if (updateResult.success()) { - if (updateResult.writeResult != null) { - location = locationToSync(location, updateResult.writeResult.getLocation()); - } - switch (updateResult.result.getResponseResult()) { - case CREATED: - case UPDATED: - @SuppressWarnings("unchecked") - WriteResult result = updateResult.writeResult; - IndexRequest indexRequest = updateResult.request(); - BytesReference indexSourceAsBytes = indexRequest.source(); - // add the response - IndexResponse indexResponse = result.getResponse(); - UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); - if (updateRequest.fields() != null && updateRequest.fields().length > 0) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); - } - item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); - setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), updateResponse)); - break; - case DELETED: - @SuppressWarnings("unchecked") - WriteResult writeResult = updateResult.writeResult; - DeleteResponse response = writeResult.getResponse(); - DeleteRequest deleteRequest = updateResult.request(); - updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); - // Replace the update request to the translated delete request to execute on the replica. - item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); - setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), updateResponse)); - break; - case NOOP: - setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), updateResult.noopResult)); - item.setIgnoreOnReplica(); // no need to go to the replica - break; - default: - throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult()); - } - // NOTE: Breaking out of the retry_on_conflict loop! - break; - } else if (updateResult.failure()) { - Throwable e = updateResult.error; - if (updateResult.retry) { - // updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration - if (updateAttemptsCount >= updateRequest.retryOnConflict()) { - setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), - new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); - } - } else { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else if (updateResult.result == null) { - setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); - } else { - switch (updateResult.result.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = updateResult.request(); - logFailure(e, "index", request.shardId(), indexRequest); - setResponse(item, new BulkItemResponse(item.id(), updateRequest.opType(), - new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); - break; - case DELETED: - DeleteRequest deleteRequest = updateResult.request(); - logFailure(e, "delete", request.shardId(), deleteRequest); - setResponse(item, new BulkItemResponse(item.id(), deleteRequest.opType(), - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); - break; - default: - throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult()); + private WriteResult innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, int requestIndex) throws Exception { + DocumentRequest itemRequest = request.items()[requestIndex].request(); + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction); + case UPDATE: + int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict(); + for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { + try { + return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest)); + } catch (Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (attemptCount == maxAttempts // bubble up exception when we run out of attempts + || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict + throw e; } } - // NOTE: Breaking out of the retry_on_conflict loop! - break; } - - } + throw new IllegalStateException("version conflict exception should bubble up on last attempt"); + case DELETE: + return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); } - return Tuple.tuple(location, item); } private void setResponse(BulkItemRequest request, BulkItemResponse response) { @@ -335,105 +214,48 @@ public class TransportShardBulkAction extends TransportWriteAction shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, - IndexShard indexShard, boolean processed) throws Exception { - - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); - if (!processed) { - indexRequest.process(mappingMd, allowIdGeneration, request.index()); - } - return TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); - } - - static class UpdateResult { - - final UpdateHelper.Result result; - final ActionRequest actionRequest; - final boolean retry; - final Throwable error; - final WriteResult writeResult; - final UpdateResponse noopResult; - - UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) { - this.result = result; - this.actionRequest = actionRequest; - this.retry = retry; - this.error = error; - this.writeResult = writeResult; - this.noopResult = null; - } - - UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) { - this.result = result; - this.actionRequest = actionRequest; - this.writeResult = writeResult; - this.retry = false; - this.error = null; - this.noopResult = null; - } - - public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) { - this.result = result; - this.noopResult = updateResponse; - this.actionRequest = null; - this.writeResult = null; - this.retry = false; - this.error = null; - } - - - boolean failure() { - return error != null; - } - - boolean success() { - return noopResult != null || writeResult != null; - } - - @SuppressWarnings("unchecked") - T request() { - return (T) actionRequest; - } - - - } - - private UpdateResult shardUpdateOperation(IndexMetaData metaData, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { + /** + * Executes update request, doing a get and translating update to a index or delete operation + * NOTE: all operations except NOOP, reassigns the bulk item request + */ + private WriteResult shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, + int requestIndex, UpdateRequest updateRequest) + throws Exception { + // Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); switch (translate.getResponseResult()) { case CREATED: case UPDATED: IndexRequest indexRequest = translate.action(); - try { - WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, metaData, indexShard, false); - return new UpdateResult(translate, indexRequest, result); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - boolean retry = false; - if (cause instanceof VersionConflictEngineException) { - retry = true; - } - return new UpdateResult(translate, indexRequest, retry, cause, null); + MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + indexRequest.process(mappingMd, allowIdGeneration, request.index()); + WriteResult writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); + BytesReference indexSourceAsBytes = indexRequest.source(); + IndexResponse indexResponse = writeResult.getResponse(); + UpdateResponse writeUpdateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); + if (updateRequest.fields() != null && updateRequest.fields().length > 0) { + Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); + writeUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); } + // Replace the update request to the translated index request to execute on the replica. + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); + return new WriteResult<>(writeUpdateResponse, writeResult.getLocation()); case DELETED: DeleteRequest deleteRequest = translate.action(); - try { - WriteResult result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - return new UpdateResult(translate, deleteRequest, result); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - boolean retry = false; - if (cause instanceof VersionConflictEngineException) { - retry = true; - } - return new UpdateResult(translate, deleteRequest, retry, cause, null); - } + WriteResult deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); + DeleteResponse response = deleteResult.getResponse(); + UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); + deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null)); + // Replace the update request to the translated delete request to execute on the replica. + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); + return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation()); case NOOP: - UpdateResponse updateResponse = translate.action(); + BulkItemRequest item = request.items()[requestIndex]; indexShard.noopUpdate(updateRequest.type()); - return new UpdateResult(translate, updateResponse); - default: - throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + item.setIgnoreOnReplica(); // no need to go to the replica + return new WriteResult<>(translate.action(), null); + default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); } } @@ -477,18 +299,6 @@ public class TransportShardBulkAction extends TransportWriteAction Date: Mon, 22 Aug 2016 13:15:18 -0400 Subject: [PATCH 03/53] Fix double delete on replica copy when executing bulk request --- .../action/bulk/TransportShardBulkAction.java | 44 ++++++++----------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 7e73eacded1..0a9b45581e8 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -267,33 +267,27 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = item.request(); + final Engine.Operation operation; + try { + switch (documentRequest.opType()) { + case CREATE: + case INDEX: + operation = TransportIndexAction.executeIndexRequestOnReplica(((IndexRequest) documentRequest), indexShard); + break; + case DELETE: + operation = TransportDeleteAction.executeDeleteRequestOnReplica(((DeleteRequest) documentRequest), indexShard); + break; + default: throw new IllegalStateException("Unexpected request operation type on replica: " + + documentRequest.opType().getLowercase()); } - } else if (item.request() instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) item.request(); - try { - Engine.Delete delete = TransportDeleteAction.executeDeleteRequestOnReplica(deleteRequest, indexShard); - indexShard.delete(delete); - location = locationToSync(location, delete.getTranslogLocation()); - } catch (Exception e) { - // if its not an ignore replica failure, we need to make sure to bubble up the failure - // so we will fail the shard - if (!ignoreReplicaException(e)) { - throw e; - } + location = locationToSync(location, operation.getTranslogLocation()); + } catch (Exception e) { + // if its not an ignore replica failure, we need to make sure to bubble up the failure + // so we will fail the shard + if (!ignoreReplicaException(e)) { + throw e; } - } else { - throw new IllegalStateException("Unexpected index operation: " + item.request()); } } return location; From 40b4f39f9f6fa0fa639803c3b30fa00d0619c932 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Tue, 4 Oct 2016 14:24:33 -0400 Subject: [PATCH 04/53] ensure bwc wire compatibility --- .../elasticsearch/action/DocumentRequest.java | 26 +++++++++++++------ .../action/bulk/BulkItemResponse.java | 7 ++++- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index 50af0dc780d..ce957bb0d61 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -105,16 +105,16 @@ public interface DocumentRequest extends IndicesRequest { * Requested operation type to perform on the document */ enum OpType { - /** - * Creates the resource. Simply adds it to the index, if there is an existing - * document with the id, then it won't be removed. - */ - CREATE(0), /** * Index the source. If there an existing document with the id, it will * be replaced. */ - INDEX(1), + INDEX(0), + /** + * Creates the resource. Simply adds it to the index, if there is an existing + * document with the id, then it won't be removed. + */ + CREATE(1), /** Updates a document */ UPDATE(2), /** Deletes a document */ @@ -138,12 +138,22 @@ public interface DocumentRequest extends IndicesRequest { public static OpType fromId(byte id) { switch (id) { - case 0: return CREATE; - case 1: return INDEX; + case 0: return INDEX; + case 1: return CREATE; case 2: return UPDATE; case 3: return DELETE; default: throw new IllegalArgumentException("Unknown opType: [" + id + "]"); } } + + public static OpType fromString(String sOpType) { + String lowerCase = sOpType.toLowerCase(Locale.ENGLISH); + for (OpType opType : OpType.values()) { + if (opType.getLowercase().equals(lowerCase)) { + return opType; + } + } + throw new IllegalArgumentException("Unknown opType: [" + sOpType + "]"); + } } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index adeda64ee56..83e8f8b7b46 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocumentRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; @@ -301,7 +302,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - opType = OpType.fromId(in.readByte()); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { + opType = OpType.fromId(in.readByte()); + } else { + opType = OpType.fromString(in.readString()); + } byte type = in.readByte(); if (type == 0) { From 57d8025010a5d0bf60a0c98279ee2719fe336005 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 6 Oct 2016 02:59:07 -0400 Subject: [PATCH 05/53] cleanup --- .../elasticsearch/action/DocumentRequest.java | 48 ++++++++++++++++++- .../action/bulk/BulkItemRequest.java | 26 +--------- .../action/bulk/BulkItemResponse.java | 6 ++- .../action/bulk/BulkRequest.java | 26 +--------- .../action/bulk/TransportShardBulkAction.java | 7 ++- 5 files changed, 60 insertions(+), 53 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index ce957bb0d61..f4c88e159c7 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -18,9 +18,15 @@ */ package org.elasticsearch.action; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.VersionType; +import java.io.IOException; import java.util.Locale; /** @@ -125,7 +131,7 @@ public interface DocumentRequest extends IndicesRequest { OpType(int op) { this.op = (byte) op; - this.lowercase = this.toString().toLowerCase(Locale.ENGLISH); + this.lowercase = this.toString().toLowerCase(Locale.ROOT); } public byte getId() { @@ -147,7 +153,7 @@ public interface DocumentRequest extends IndicesRequest { } public static OpType fromString(String sOpType) { - String lowerCase = sOpType.toLowerCase(Locale.ENGLISH); + String lowerCase = sOpType.toLowerCase(Locale.ROOT); for (OpType opType : OpType.values()) { if (opType.getLowercase().equals(lowerCase)) { return opType; @@ -156,4 +162,42 @@ public interface DocumentRequest extends IndicesRequest { throw new IllegalArgumentException("Unknown opType: [" + sOpType + "]"); } } + + /** read a document write (index/delete/update) request */ + static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { + byte type = in.readByte(); + final DocumentRequest documentRequest; + if (type == 0) { + IndexRequest indexRequest = new IndexRequest(); + indexRequest.readFrom(in); + documentRequest = indexRequest; + } else if (type == 1) { + DeleteRequest deleteRequest = new DeleteRequest(); + deleteRequest.readFrom(in); + documentRequest = deleteRequest; + } else if (type == 2) { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.readFrom(in); + documentRequest = updateRequest; + } else { + throw new IllegalStateException("invalid request type [" + type+ " ]"); + } + return documentRequest; + } + + /** write a document write (index/delete/update) request*/ + static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { + if (request instanceof IndexRequest) { + out.writeByte((byte) 0); + ((IndexRequest) request).writeTo(out); + } else if (request instanceof DeleteRequest) { + out.writeByte((byte) 1); + ((DeleteRequest) request).writeTo(out); + } else if (request instanceof UpdateRequest) { + out.writeByte((byte) 2); + ((UpdateRequest) request).writeTo(out); + } else { + throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]"); + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 79503fcf9ee..079d4efe9bf 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -89,20 +89,7 @@ public class BulkItemRequest implements Streamable { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - byte type = in.readByte(); - if (type == 0) { - IndexRequest indexRequest = new IndexRequest(); - indexRequest.readFrom(in); - request = indexRequest; - } else if (type == 1) { - DeleteRequest deleteRequest = new DeleteRequest(); - deleteRequest.readFrom(in); - request = deleteRequest; - } else if (type == 2) { - UpdateRequest updateRequest = new UpdateRequest(); - updateRequest.readFrom(in); - request = updateRequest; - } + request = DocumentRequest.readDocumentRequest(in); if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } @@ -112,16 +99,7 @@ public class BulkItemRequest implements Streamable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - if (request instanceof IndexRequest) { - out.writeByte((byte) 0); - ((IndexRequest) request).writeTo(out); - } else if (request instanceof DeleteRequest) { - out.writeByte((byte) 1); - ((DeleteRequest) request).writeTo(out); - } else if (request instanceof UpdateRequest) { - out.writeByte((byte) 2); - ((UpdateRequest) request).writeTo(out); - } + DocumentRequest.writeDocumentRequest(out, request); out.writeOptionalStreamable(primaryResponse); out.writeBoolean(ignoreOnReplica); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 83e8f8b7b46..9f0714784bc 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -328,7 +328,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - out.writeByte(opType.getId()); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { + out.writeByte(opType.getId()); + } else { + out.writeString(opType.getLowercase()); + } if (response == null) { out.writeByte((byte) 2); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 57a04314593..dc72407cf42 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -550,20 +550,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards = ActiveShardCount.readFrom(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - byte type = in.readByte(); - if (type == 0) { - IndexRequest request = new IndexRequest(); - request.readFrom(in); - requests.add(request); - } else if (type == 1) { - DeleteRequest request = new DeleteRequest(); - request.readFrom(in); - requests.add(request); - } else if (type == 2) { - UpdateRequest request = new UpdateRequest(); - request.readFrom(in); - requests.add(request); - } + requests.add(DocumentRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); timeout = new TimeValue(in); @@ -575,16 +562,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); for (DocumentRequest request : requests) { - if (request instanceof IndexRequest) { - out.writeByte((byte) 0); - ((IndexRequest) request).writeTo(out); - } else if (request instanceof DeleteRequest) { - out.writeByte((byte) 1); - ((DeleteRequest) request).writeTo(out); - } else if (request instanceof UpdateRequest) { - out.writeByte((byte) 2); - ((UpdateRequest) request).writeTo(out); - } + DocumentRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 098092ef1ed..9289fc0cab8 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -137,8 +137,11 @@ public class TransportShardBulkAction extends TransportWriteAction writeResult = innerExecuteBulkItemRequest(metaData, indexShard, request, requestIndex); - if (writeResult.getResponse().getResult() != DocWriteResponse.Result.NOOP) { + if (writeResult.getLocation() != null) { location = locationToSync(location, writeResult.getLocation()); + } else { + assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP + : "only noop operation can have null next operation"; } // update the bulk item request because update request execution can mutate the bulk item request BulkItemRequest item = request.items()[requestIndex]; @@ -157,7 +160,7 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = item.request(); - if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { + if (isConflictException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), documentRequest.opType().getLowercase(), request), e); } else { From eee0d18f94108b5ece7b18a450bbcc5c729d9311 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 6 Oct 2016 04:26:32 -0400 Subject: [PATCH 06/53] Make update a replication action Currently, update action delegates to index and delete actions for replication using a dedicated transport action. This change makes update a replication operation, removing the dedicated transport action. This simplifies bulk execution and removes duplicate logic for update retries and translation. This consolidates the interface for single document write requests. Now on the primary, the update request is translated to an index or delete request before execution and the translated request is sent to copies for replication. --- .../elasticsearch/action/DocumentRequest.java | 61 ++-- .../action/bulk/BulkItemRequest.java | 3 - .../action/bulk/BulkRequest.java | 4 +- .../action/bulk/TransportBulkAction.java | 10 +- .../action/bulk/TransportShardBulkAction.java | 173 +++++---- .../action/delete/DeleteRequest.java | 2 +- .../action/delete/TransportDeleteAction.java | 26 +- .../action/index/IndexRequest.java | 2 +- .../action/index/TransportIndexAction.java | 12 +- .../replication/ReplicationOperation.java | 36 +- .../TransportReplicationAction.java | 16 +- .../replication/TransportWriteAction.java | 36 +- .../InstanceShardOperationRequest.java | 138 -------- .../InstanceShardOperationRequestBuilder.java | 60 ---- ...ransportInstanceSingleOperationAction.java | 270 --------------- .../action/update/TransportUpdateAction.java | 297 +++++++--------- .../action/update/UpdateHelper.java | 14 +- .../action/update/UpdateReplicaRequest.java | 113 ++++++ .../action/update/UpdateRequest.java | 43 +-- .../action/update/UpdateRequestBuilder.java | 4 +- .../elasticsearch/indices/IndicesModule.java | 2 - .../action/IndicesRequestIT.java | 9 +- .../TransportWriteActionTests.java | 11 +- ...ortInstanceSingleOperationActionTests.java | 327 ------------------ .../action/update/UpdateRequestTests.java | 3 +- .../ESIndexLevelReplicationTestCase.java | 4 +- docs/reference/docs/update.asciidoc | 4 +- 27 files changed, 460 insertions(+), 1220 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java delete mode 100644 core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java delete mode 100644 core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java create mode 100644 core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java delete mode 100644 core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index f4c88e159c7..ef2aa815a6b 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -20,10 +20,11 @@ package org.elasticsearch.action; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.update.UpdateReplicaRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.index.VersionType; import java.io.IOException; @@ -33,84 +34,72 @@ import java.util.Locale; * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} */ -public interface DocumentRequest extends IndicesRequest { - - /** - * Get the index that this request operates on - * @return the index - */ - String index(); +public abstract class DocumentRequest> extends ReplicatedWriteRequest { /** * Get the type that this request operates on * @return the type */ - String type(); + public abstract String type(); /** * Get the id of the document for this request * @return the id */ - String id(); - - /** - * Get the options for this request - * @return the indices options - */ - IndicesOptions indicesOptions(); + public abstract String id(); /** * Set the routing for this request * @return the Request */ - T routing(String routing); + public abstract T routing(String routing); /** * Get the routing for this request * @return the Routing */ - String routing(); + public abstract String routing(); /** * Get the parent for this request * @return the Parent */ - String parent(); + public abstract String parent(); /** * Get the document version for this request * @return the document version */ - long version(); + public abstract long version(); /** * Sets the version, which will perform the operation only if a matching * version exists and no changes happened on the doc since then. */ - T version(long version); + public abstract T version(long version); /** * Get the document version type for this request * @return the document version type */ - VersionType versionType(); + public abstract VersionType versionType(); /** * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. */ - T versionType(VersionType versionType); + public abstract T versionType(VersionType versionType); /** * Get the requested document operation type of the request * @return the operation type {@link OpType} */ - OpType opType(); + public abstract OpType opType(); /** * Requested operation type to perform on the document */ - enum OpType { + public enum OpType { /** * Index the source. If there an existing document with the id, it will * be replaced. @@ -164,40 +153,42 @@ public interface DocumentRequest extends IndicesRequest { } /** read a document write (index/delete/update) request */ - static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { + public static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { byte type = in.readByte(); - final DocumentRequest documentRequest; if (type == 0) { IndexRequest indexRequest = new IndexRequest(); indexRequest.readFrom(in); - documentRequest = indexRequest; + return indexRequest; } else if (type == 1) { DeleteRequest deleteRequest = new DeleteRequest(); deleteRequest.readFrom(in); - documentRequest = deleteRequest; + return deleteRequest; } else if (type == 2) { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.readFrom(in); - documentRequest = updateRequest; + return updateRequest; + } else if (type == 3) { + UpdateReplicaRequest updateReplicaRequest = new UpdateReplicaRequest(); + updateReplicaRequest.readFrom(in); + return updateReplicaRequest; } else { throw new IllegalStateException("invalid request type [" + type+ " ]"); } - return documentRequest; } /** write a document write (index/delete/update) request*/ - static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { + public static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { if (request instanceof IndexRequest) { out.writeByte((byte) 0); - ((IndexRequest) request).writeTo(out); } else if (request instanceof DeleteRequest) { out.writeByte((byte) 1); - ((DeleteRequest) request).writeTo(out); } else if (request instanceof UpdateRequest) { out.writeByte((byte) 2); - ((UpdateRequest) request).writeTo(out); + } else if (request instanceof UpdateReplicaRequest) { + out.writeByte((byte) 3); } else { throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]"); } + request.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 079d4efe9bf..df9fd13b034 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -20,9 +20,6 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index dc72407cf42..7729c737439 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -528,11 +528,11 @@ public class BulkRequest extends ActionRequest implements Composite } for (DocumentRequest request : requests) { // We first check if refresh has been set - if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { + if (request.getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } - ActionRequestValidationException ex = ((WriteRequest) request).validate(); + ActionRequestValidationException ex = request.validate(); if (ex != null) { if (validationException == null) { validationException = new ActionRequestValidationException(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f7861d1e093..37c1b7c2290 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -27,14 +27,12 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.update.TransportUpdateAction; -import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -211,7 +209,7 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = bulkRequest.requests.get(i); + DocumentRequest documentRequest = bulkRequest.requests.get(i); //the request can only be null because we set it to null in the previous step, so it gets ignored if (documentRequest == null) { continue; @@ -234,10 +232,8 @@ public class TransportBulkAction extends HandledTransportAction { +public class TransportShardBulkAction extends TransportWriteAction { public static final String ACTION_NAME = BulkAction.NAME + "[s]"; - private final UpdateHelper updateHelper; private final boolean allowIdGeneration; private final MappingUpdatedAction mappingUpdatedAction; + private final UpdateHelper updateHelper; + private final AutoCreateIndex autoCreateIndex; + private final TransportCreateIndexAction createIndexAction; @Inject public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { + MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, ScriptService scriptService, + AutoCreateIndex autoCreateIndex, TransportCreateIndexAction createIndexAction) { super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, BulkShardRequest::new, ThreadPool.Names.BULK); - this.updateHelper = updateHelper; + indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK); this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); this.mappingUpdatedAction = mappingUpdatedAction; + this.updateHelper = new UpdateHelper(scriptService, logger); + this.autoCreateIndex = autoCreateIndex; + this.createIndexAction = createIndexAction; } @Override @@ -105,7 +122,39 @@ public class TransportShardBulkAction extends TransportWriteAction onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { + protected void doExecute(Task task, BulkShardRequest request, ActionListener listener) { + ClusterState state = clusterService.state(); + if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(); + createIndexRequest.index(request.index()); + createIndexRequest.cause("auto(bulk api)"); + createIndexRequest.masterNodeTimeout(request.timeout()); + createIndexAction.execute(task, createIndexRequest, new ActionListener() { + @Override + public void onResponse(CreateIndexResponse result) { + innerExecute(task, request, listener); + } + + @Override + public void onFailure(Exception e) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { + // we have the index, do it + innerExecute(task, request, listener); + } else { + listener.onFailure(e); + } + } + }); + } else { + innerExecute(task, request, listener); + } + } + + private void innerExecute(Task task, final BulkShardRequest request, final ActionListener listener) { + super.doExecute(task, request, listener); + } + @Override + protected WriteResult onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { ShardId shardId = request.shardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); @@ -123,7 +172,7 @@ public class TransportShardBulkAction extends TransportWriteAction(response, location); + return new WriteResult<>(request, response, location); } /** Executes bulk item requests and handles request execution exceptions */ @@ -131,22 +180,39 @@ public class TransportShardBulkAction extends TransportWriteAction itemRequest = request.items()[requestIndex].request(); + preVersions[requestIndex] = itemRequest.version(); + preVersionTypes[requestIndex] = itemRequest.versionType(); + DocumentRequest.OpType opType = itemRequest.opType(); try { - WriteResult writeResult = innerExecuteBulkItemRequest(metaData, indexShard, - request, requestIndex); + final WriteResult writeResult; + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + writeResult = TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, + mappingUpdatedAction); + break; + case UPDATE: + writeResult = TransportUpdateAction.executeUpdateRequestOnPrimary(((UpdateRequest) itemRequest), indexShard, + metaData, updateHelper, mappingUpdatedAction, allowIdGeneration); + break; + case DELETE: + writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); + break; + default: + throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); + } if (writeResult.getLocation() != null) { location = locationToSync(location, writeResult.getLocation()); } else { assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP : "only noop operation can have null next operation"; } - // update the bulk item request because update request execution can mutate the bulk item request - BulkItemRequest item = request.items()[requestIndex]; + // update the bulk item request with replica request (update request are changed to index or delete requests for replication) + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), + (DocumentRequest) writeResult.getReplicaRequest()); // add the response - setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse())); + setResponse(request.items()[requestIndex], new BulkItemResponse(request.items()[requestIndex].id(), opType, writeResult.getResponse())); } catch (Exception e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it if (retryPrimaryException(e)) { @@ -182,33 +248,6 @@ public class TransportShardBulkAction extends TransportWriteAction innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, - BulkShardRequest request, int requestIndex) throws Exception { - DocumentRequest itemRequest = request.items()[requestIndex].request(); - switch (itemRequest.opType()) { - case CREATE: - case INDEX: - return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction); - case UPDATE: - int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict(); - for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { - try { - return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest)); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - if (attemptCount == maxAttempts // bubble up exception when we run out of attempts - || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict - throw e; - } - } - } - throw new IllegalStateException("version conflict exception should bubble up on last attempt"); - case DELETE: - return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); - default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); - } - } - private void setResponse(BulkItemRequest request, BulkItemResponse response) { request.setPrimaryResponse(response); if (response.isFailed()) { @@ -219,51 +258,6 @@ public class TransportShardBulkAction extends TransportWriteAction shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard, - BulkShardRequest request, - int requestIndex, UpdateRequest updateRequest) - throws Exception { - // Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request - UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); - switch (translate.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = translate.action(); - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); - indexRequest.process(mappingMd, allowIdGeneration, request.index()); - WriteResult writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); - BytesReference indexSourceAsBytes = indexRequest.source(); - IndexResponse indexResponse = writeResult.getResponse(); - UpdateResponse writeUpdateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); - if (updateRequest.fields() != null && updateRequest.fields().length > 0) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - writeUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); - } - // Replace the update request to the translated index request to execute on the replica. - request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); - return new WriteResult<>(writeUpdateResponse, writeResult.getLocation()); - case DELETED: - DeleteRequest deleteRequest = translate.action(); - WriteResult deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse response = deleteResult.getResponse(); - UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null)); - // Replace the update request to the translated delete request to execute on the replica. - request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); - return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation()); - case NOOP: - BulkItemRequest item = request.items()[requestIndex]; - indexShard.noopUpdate(updateRequest.type()); - item.setIgnoreOnReplica(); // no need to go to the replica - return new WriteResult<>(translate.action(), null); - default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); - } - } - @Override protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) { Translog.Location location = null; @@ -272,7 +266,8 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = item.request(); + DocumentRequest documentRequest = (item.request() instanceof UpdateReplicaRequest) + ? ((UpdateReplicaRequest) item.request()).getRequest() : item.request(); final Engine.Operation operation; try { switch (documentRequest.opType()) { diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index e3babcfc380..f2e5e13494d 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicatedWriteRequest implements DocumentRequest { +public class DeleteRequest extends DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 6f3d27ea369..926700e327e 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.delete; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -49,7 +50,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportWriteAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -61,7 +62,7 @@ public class TransportDeleteAction extends TransportWriteAction listener) { ClusterState state = clusterService.state(); if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { - createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener() { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(); + createIndexRequest.index(request.index()); + createIndexRequest.cause("auto(delete api)"); + createIndexRequest.masterNodeTimeout(request.timeout()); + createIndexAction.execute(task, createIndexRequest, new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(task, request, listener); @@ -100,15 +105,6 @@ public class TransportDeleteAction extends TransportWriteAction listener) { super.doExecute(task, request, listener); } @@ -119,7 +115,7 @@ public class TransportDeleteAction extends TransportWriteAction onPrimaryShard(DeleteRequest request, IndexShard indexShard) { + protected WriteResult onPrimaryShard(DeleteRequest request, IndexShard indexShard) { return executeDeleteRequestOnPrimary(request, indexShard); } @@ -128,7 +124,7 @@ public class TransportDeleteAction extends TransportWriteAction executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { + public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { Engine.Delete delete = indexShard.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); indexShard.delete(delete); // update the request with the version so it will go to the replicas @@ -137,7 +133,7 @@ public class TransportDeleteAction extends TransportWriteAction(response, delete.getTranslogLocation()); + return new WriteResult<>(request, response, delete.getTranslogLocation()); } public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index cce0f6c8eef..48eaab2b48c 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -67,7 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { +public class IndexRequest extends DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index cc3fbb7906d..37cc2d7e3bc 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -60,7 +60,7 @@ import org.elasticsearch.transport.TransportService; *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportWriteAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -76,7 +76,7 @@ public class TransportIndexAction extends TransportWriteAction onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { + protected WriteResult onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); } @@ -174,7 +174,7 @@ public class TransportIndexAction extends TransportWriteAction executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception { Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); @@ -198,7 +198,7 @@ public class TransportIndexAction extends TransportWriteAction(response, operation.getTranslogLocation()); + return new WriteResult<>(request, response, operation.getTranslogLocation()); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index d541ef6a35c..8aa0ed66a77 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -112,22 +113,24 @@ public class ReplicationOperation< pendingActions.incrementAndGet(); primaryResult = primary.perform(request); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); - assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; - if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + if (replicaRequest != null) { + assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; + if (logger.isTraceEnabled()) { + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + } + + // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. + // we have to make sure that every operation indexed into the primary after recovery start will also be replicated + // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. + ClusterState clusterState = clusterStateSupplier.get(); + final List shards = getShards(primaryId, clusterState); + Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); + + markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); + + performOnReplicas(replicaRequest, shards); } - // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. - // we have to make sure that every operation indexed into the primary after recovery start will also be replicated - // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. - ClusterState clusterState = clusterStateSupplier.get(); - final List shards = getShards(primaryId, clusterState); - Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); - - markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); - - performOnReplicas(replicaRequest, shards); - successfulShards.incrementAndGet(); decPendingAndFinishIfNeeded(); } @@ -419,7 +422,10 @@ public class ReplicationOperation< public interface PrimaryResult> { - R replicaRequest(); + /** + * @return null if no operation needs to be sent to a replica + */ + @Nullable R replicaRequest(); void setShardInfo(ReplicationResponse.ShardInfo shardInfo); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 9587b4e6b2c..95e196672d4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -23,6 +23,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -163,6 +165,16 @@ public abstract class TransportReplicationAction< } } + /** helper to verify and resolve request routing */ + public static void resolveAndValidateRouting(final MetaData metaData, final String concreteIndex, + DocumentRequest request) { + request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())); + // check if routing is required, if so, throw error if routing wasn't specified + if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) { + throw new RoutingMissingException(concreteIndex, request.type(), request.id()); + } + } + /** * Primary operation on node with primary copy. * @@ -900,7 +912,9 @@ public abstract class TransportReplicationAction< @Override public PrimaryResult perform(Request request) throws Exception { PrimaryResult result = shardOperationOnPrimary(request); - result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); + if (result.replicaRequest() != null) { + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); + } return result; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index bf2b3235b11..ee8ee4862f9 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -49,38 +49,40 @@ import java.util.function.Supplier; */ public abstract class TransportWriteAction< Request extends ReplicatedWriteRequest, + ReplicaRequest extends ReplicatedWriteRequest, Response extends ReplicationResponse & WriteResponse - > extends TransportReplicationAction { + > extends TransportReplicationAction { protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + Supplier replicaRequest, String executor) { super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, request, executor); + indexNameExpressionResolver, request, replicaRequest, executor); } /** * Called on the primary with a reference to the {@linkplain IndexShard} to modify. */ - protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; + protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; /** * Called once per replica with a reference to the {@linkplain IndexShard} to modify. * * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred */ - protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); + protected abstract Translog.Location onReplicaShard(ReplicaRequest request, IndexShard indexShard); @Override protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { IndexShard indexShard = indexShard(request); - WriteResult result = onPrimaryShard(request, indexShard); - return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); + WriteResult result = onPrimaryShard(request, indexShard); + return new WritePrimaryResult(request, result, indexShard); } @Override - protected final WriteReplicaResult shardOperationOnReplica(Request request) { + protected final WriteReplicaResult shardOperationOnReplica(ReplicaRequest request) { IndexShard indexShard = indexShard(request); Translog.Location location = onReplicaShard(request, indexShard); return new WriteReplicaResult(indexShard, request, location); @@ -89,7 +91,7 @@ public abstract class TransportWriteAction< /** * Fetch the IndexShard for the request. Protected so it can be mocked in tests. */ - protected IndexShard indexShard(Request request) { + protected IndexShard indexShard(ReplicatedWriteRequest request) { final ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); return indexService.getShard(shardId.id()); @@ -98,11 +100,13 @@ public abstract class TransportWriteAction< /** * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. */ - public static class WriteResult { + public static class WriteResult, Response extends ReplicationResponse> { + private final ReplicaRequest replicaRequest; private final Response response; private final Translog.Location location; - public WriteResult(Response response, @Nullable Location location) { + public WriteResult(ReplicaRequest replicaRequest, Response response, @Nullable Location location) { + this.replicaRequest = replicaRequest; this.response = response; this.location = location; } @@ -114,6 +118,10 @@ public abstract class TransportWriteAction< public Translog.Location getLocation() { return location; } + + public ReplicaRequest getReplicaRequest() { + return replicaRequest; + } } /** @@ -123,15 +131,15 @@ public abstract class TransportWriteAction< boolean finishedAsyncActions; ActionListener listener = null; - public WritePrimaryResult(Request request, Response finalResponse, - @Nullable Translog.Location location, + public WritePrimaryResult(Request request, + WriteResult result, IndexShard indexShard) { - super(request, finalResponse); + super(result.getReplicaRequest(), result.getResponse()); /* * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the * refresh in parallel on the primary and on the replica. */ - new AsyncAfterWriteAction(indexShard, request, location, this, logger).run(); + new AsyncAfterWriteAction(indexShard, request, result.getLocation(), this, logger).run(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java deleted file mode 100644 index cb9a6ab9f69..00000000000 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.single.instance; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -/** - * - */ -public abstract class InstanceShardOperationRequest> extends ActionRequest - implements IndicesRequest { - - public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); - - protected TimeValue timeout = DEFAULT_TIMEOUT; - - protected String index; - // null means its not set, allows to explicitly direct a request to a specific shard - protected ShardId shardId = null; - - private String concreteIndex; - - protected InstanceShardOperationRequest() { - } - - public InstanceShardOperationRequest(String index) { - this.index = index; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (index == null) { - validationException = ValidateActions.addValidationError("index is missing", validationException); - } - return validationException; - } - - public String index() { - return index; - } - - @Override - public String[] indices() { - return new String[]{index}; - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @SuppressWarnings("unchecked") - public final Request index(String index) { - this.index = index; - return (Request) this; - } - - public TimeValue timeout() { - return timeout; - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final Request timeout(TimeValue timeout) { - this.timeout = timeout; - return (Request) this; - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - public final Request timeout(String timeout) { - return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); - } - - public String concreteIndex() { - return concreteIndex; - } - - void concreteIndex(String concreteIndex) { - this.concreteIndex = concreteIndex; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - if (in.readBoolean()) { - shardId = ShardId.readShardId(in); - } else { - shardId = null; - } - timeout = new TimeValue(in); - concreteIndex = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - out.writeOptionalStreamable(shardId); - timeout.writeTo(out); - out.writeOptionalString(concreteIndex); - } - -} - diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java deleted file mode 100644 index 13266b9151d..00000000000 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.single.instance; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.unit.TimeValue; - -/** - */ -public abstract class InstanceShardOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends InstanceShardOperationRequestBuilder> - extends ActionRequestBuilder { - - protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { - super(client, action, request); - } - - @SuppressWarnings("unchecked") - public final RequestBuilder setIndex(String index) { - request.index(index); - return (RequestBuilder) this; - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final RequestBuilder setTimeout(String timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java deleted file mode 100644 index 81da5ec9a86..00000000000 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.single.instance; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.UnavailableShardsException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.NodeClosedException; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; - -import java.util.function.Supplier; - -/** - * - */ -public abstract class TransportInstanceSingleOperationAction, Response extends ActionResponse> - extends HandledTransportAction { - protected final ClusterService clusterService; - protected final TransportService transportService; - - final String executor; - final String shardActionName; - - protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, - ClusterService clusterService, TransportService transportService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { - super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); - this.clusterService = clusterService; - this.transportService = transportService; - this.executor = executor(); - this.shardActionName = actionName + "[s]"; - transportService.registerRequestHandler(shardActionName, request, executor, new ShardTransportHandler()); - } - - @Override - protected void doExecute(Request request, ActionListener listener) { - new AsyncSingleAction(request, listener).start(); - } - - protected abstract String executor(); - - protected abstract void shardOperation(Request request, ActionListener listener); - - protected abstract Response newResponse(); - - protected ClusterBlockException checkGlobalBlock(ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); - } - - protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) { - return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex()); - } - - /** - * Resolves the request. Throws an exception if the request cannot be resolved. - */ - protected abstract void resolveRequest(ClusterState state, Request request); - - protected boolean retryOnFailure(Exception e) { - return false; - } - - protected TransportRequestOptions transportOptions() { - return TransportRequestOptions.EMPTY; - } - - /** - * Should return an iterator with a single shard! - */ - protected abstract ShardIterator shards(ClusterState clusterState, Request request); - - class AsyncSingleAction { - - private final ActionListener listener; - private final Request request; - private volatile ClusterStateObserver observer; - private ShardIterator shardIt; - private DiscoveryNodes nodes; - - AsyncSingleAction(Request request, ActionListener listener) { - this.request = request; - this.listener = listener; - } - - public void start() { - this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext()); - doStart(); - } - - protected void doStart() { - nodes = observer.observedState().nodes(); - try { - ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); - if (blockException != null) { - if (blockException.retryable()) { - retry(blockException); - return; - } else { - throw blockException; - } - } - request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request).getName()); - resolveRequest(observer.observedState(), request); - blockException = checkRequestBlock(observer.observedState(), request); - if (blockException != null) { - if (blockException.retryable()) { - retry(blockException); - return; - } else { - throw blockException; - } - } - shardIt = shards(observer.observedState(), request); - } catch (Exception e) { - listener.onFailure(e); - return; - } - - // no shardIt, might be in the case between index gateway recovery and shardIt initialization - if (shardIt.size() == 0) { - retry(null); - return; - } - - // this transport only make sense with an iterator that returns a single shard routing (like primary) - assert shardIt.size() == 1; - - ShardRouting shard = shardIt.nextOrNull(); - assert shard != null; - - if (!shard.active()) { - retry(null); - return; - } - - request.shardId = shardIt.shardId(); - DiscoveryNode node = nodes.get(shard.currentNodeId()); - transportService.sendRequest(node, shardActionName, request, transportOptions(), new TransportResponseHandler() { - - @Override - public Response newInstance() { - return newResponse(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void handleResponse(Response response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - final Throwable cause = exp.unwrapCause(); - // if we got disconnected from the node, or the node / shard is not in the right state (being closed) - if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || - retryOnFailure(exp)) { - retry((Exception) cause); - } else { - listener.onFailure(exp); - } - } - }); - } - - void retry(@Nullable final Exception failure) { - if (observer.isTimedOut()) { - // we running as a last attempt after a timeout has happened. don't retry - Exception listenFailure = failure; - if (listenFailure == null) { - if (shardIt == null) { - listenFailure = new UnavailableShardsException(request.concreteIndex(), -1, "Timeout waiting for [{}], request: {}", request.timeout(), actionName); - } else { - listenFailure = new UnavailableShardsException(shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName); - } - } - listener.onFailure(listenFailure); - return; - } - - observer.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - doStart(); - } - - @Override - public void onClusterServiceClose() { - listener.onFailure(new NodeClosedException(nodes.getLocalNode())); - } - - @Override - public void onTimeout(TimeValue timeout) { - // just to be on the safe side, see if we can start it now? - doStart(); - } - }, request.timeout()); - } - } - - private class ShardTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - shardOperation(request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn("failed to send response for get", inner); - } - } - }); - - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index e5322f51d50..1f3a97a25a2 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -34,19 +34,17 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.action.support.single.instance.TransportInstanceSingleOperationAction; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.PlainShardIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -54,59 +52,52 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Collections; import java.util.Map; import static org.elasticsearch.ExceptionsHelper.unwrapCause; /** */ -public class TransportUpdateAction extends TransportInstanceSingleOperationAction { +public class TransportUpdateAction extends TransportWriteAction { - private final TransportDeleteAction deleteAction; - private final TransportIndexAction indexAction; private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; private final UpdateHelper updateHelper; private final IndicesService indicesService; + private final MappingUpdatedAction mappingUpdatedAction; + private final boolean allowIdGeneration; @Inject public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - TransportIndexAction indexAction, TransportDeleteAction deleteAction, TransportCreateIndexAction createIndexAction, - UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - IndicesService indicesService, AutoCreateIndex autoCreateIndex) { - super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new); - this.indexAction = indexAction; - this.deleteAction = deleteAction; + TransportCreateIndexAction createIndexAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, + AutoCreateIndex autoCreateIndex, ShardStateAction shardStateAction, + MappingUpdatedAction mappingUpdatedAction, ScriptService scriptService) { + super(settings, UpdateAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, + actionFilters, indexNameExpressionResolver, UpdateRequest::new, UpdateReplicaRequest::new, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; - this.updateHelper = updateHelper; + this.updateHelper = new UpdateHelper(scriptService, logger); this.indicesService = indicesService; this.autoCreateIndex = autoCreateIndex; + this.mappingUpdatedAction = mappingUpdatedAction; + this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); } @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - - @Override - protected UpdateResponse newResponse() { - return new UpdateResponse(); - } - - @Override - protected boolean retryOnFailure(Exception e) { - return TransportActions.isShardNotAvailableException(e); - } - - @Override - protected void resolveRequest(ClusterState state, UpdateRequest request) { - resolveAndValidateRouting(state.metaData(), request.concreteIndex(), request); + protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, UpdateRequest request) { + super.resolveRequest(metaData, indexMetaData, request); + resolveAndValidateRouting(metaData, indexMetaData.getIndex().getName(), request); + ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), + indexMetaData.getIndex().getName(), request.id(), request.routing()); + request.setShardId(shardId); } public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) { @@ -118,13 +109,17 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } @Override - protected void doExecute(final UpdateRequest request, final ActionListener listener) { + protected void doExecute(Task task, UpdateRequest request, ActionListener listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { - createIndexAction.execute(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener() { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(); + createIndexRequest.index(request.index()); + createIndexRequest.cause("auto(update api)"); + createIndexRequest.masterNodeTimeout(request.timeout()); + createIndexAction.execute(createIndexRequest, new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { - innerExecute(request, listener); + innerExecute(task, request, listener); } @Override @@ -132,7 +127,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio if (unwrapCause(e) instanceof IndexAlreadyExistsException) { // we have the index, do it try { - innerExecute(request, listener); + innerExecute(task, request, listener); } catch (Exception inner) { inner.addSuppressed(e); listener.onFailure(inner); @@ -143,153 +138,123 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } }); } else { - innerExecute(request, listener); + innerExecute(task, request, listener); } } - private void innerExecute(final UpdateRequest request, final ActionListener listener) { - super.doExecute(request, listener); - } - @Override - protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) { - if (request.getShardId() != null) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt(); - } - ShardIterator shardIterator = clusterService.operationRouting() - .indexShards(clusterState, request.concreteIndex(), request.id(), request.routing()); - ShardRouting shard; - while ((shard = shardIterator.nextOrNull()) != null) { - if (shard.primary()) { - return new PlainShardIterator(shardIterator.shardId(), Collections.singletonList(shard)); + protected UpdateResponse newResponseInstance() { + return new UpdateResponse(); + } + + private void innerExecute(Task task, final UpdateRequest request, final ActionListener listener) { + super.doExecute(task, request, listener); + } + + @Override + protected WriteResult onPrimaryShard(UpdateRequest request, IndexShard indexShard) throws Exception { + ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexMetaData indexMetaData = indexService.getMetaData(); + return executeUpdateRequestOnPrimary(request, indexShard, indexMetaData, updateHelper, mappingUpdatedAction, allowIdGeneration); + } + + public static WriteResult executeUpdateRequestOnPrimary(UpdateRequest request, + IndexShard indexShard, + IndexMetaData indexMetaData, + UpdateHelper updateHelper, + MappingUpdatedAction mappingUpdatedAction, + boolean allowIdGeneration) + throws Exception { + int maxAttempts = request.retryOnConflict(); + for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { + try { + return shardUpdateOperation(indexMetaData, indexShard, request, updateHelper, mappingUpdatedAction, allowIdGeneration); + } catch (Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (attemptCount == maxAttempts // bubble up exception when we run out of attempts + || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict + throw e; + } } } - return new PlainShardIterator(shardIterator.shardId(), Collections.emptyList()); + throw new IllegalStateException("version conflict exception should bubble up on last attempt"); + } - @Override - protected void shardOperation(final UpdateRequest request, final ActionListener listener) { - shardOperation(request, listener, 0); - } - - protected void shardOperation(final UpdateRequest request, final ActionListener listener, final int retryCount) { - final ShardId shardId = request.getShardId(); - final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexShard indexShard = indexService.getShard(shardId.getId()); + private static WriteResult shardUpdateOperation(IndexMetaData indexMetaData, + IndexShard indexShard, + UpdateRequest request, + UpdateHelper updateHelper, + MappingUpdatedAction mappingUpdatedAction, + boolean allowIdGeneration) + throws Exception { final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.getResponseResult()) { case CREATED: - IndexRequest upsertRequest = result.action(); - // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request - final BytesReference upsertSourceBytes = upsertRequest.source(); - indexAction.execute(upsertRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || - (request.fields() != null && request.fields().length > 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); - } else { - update.setGetResult(null); - } - update.setForcedRefresh(response.forcedRefresh()); - listener.onResponse(update); - } - - @Override - public void onFailure(Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof VersionConflictEngineException) { - if (retryCount < request.retryOnConflict()) { - logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", - retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); - threadPool.executor(executor()).execute(new ActionRunnable(listener) { - @Override - protected void doRun() { - shardOperation(request, listener, retryCount + 1); - } - }); - return; - } - } - listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); - } - }); - break; case UPDATED: IndexRequest indexRequest = result.action(); + MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); + indexRequest.process(mappingMd, allowIdGeneration, indexMetaData.getIndex().getName()); + WriteResult indexResponseWriteResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); + IndexResponse response = indexResponseWriteResult.getResponse(); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference indexSourceBytes = indexRequest.source(); - indexAction.execute(indexRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); - update.setForcedRefresh(response.forcedRefresh()); - listener.onResponse(update); + if (result.getResponseResult() == DocWriteResponse.Result.CREATED) { + if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || + (request.fields() != null && request.fields().length > 0)) { + Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceBytes, true); + update.setGetResult(updateHelper.extractGetResult(request, indexMetaData.getIndex().getName(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceBytes)); + } else { + update.setGetResult(null); } - - @Override - public void onFailure(Exception e) { - final Throwable cause = unwrapCause(e); - if (cause instanceof VersionConflictEngineException) { - if (retryCount < request.retryOnConflict()) { - threadPool.executor(executor()).execute(new ActionRunnable(listener) { - @Override - protected void doRun() { - shardOperation(request, listener, retryCount + 1); - } - }); - return; - } - } - listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); - } - }); - break; + } else if (result.getResponseResult() == DocWriteResponse.Result.UPDATED) { + update.setGetResult(updateHelper.extractGetResult(request, indexMetaData.getIndex().getName(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + } + update.setForcedRefresh(response.forcedRefresh()); + UpdateReplicaRequest updateReplicaRequest = new UpdateReplicaRequest(indexRequest); + updateReplicaRequest.setParentTask(request.getParentTask()); + updateReplicaRequest.setShardId(request.shardId()); + updateReplicaRequest.setRefreshPolicy(request.getRefreshPolicy()); + return new WriteResult<>(updateReplicaRequest, update, indexResponseWriteResult.getLocation()); case DELETED: DeleteRequest deleteRequest = result.action(); - deleteAction.execute(deleteRequest, new ActionListener() { - @Override - public void onResponse(DeleteResponse response) { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); - update.setForcedRefresh(response.forcedRefresh()); - listener.onResponse(update); - } - - @Override - public void onFailure(Exception e) { - final Throwable cause = unwrapCause(e); - if (cause instanceof VersionConflictEngineException) { - if (retryCount < request.retryOnConflict()) { - threadPool.executor(executor()).execute(new ActionRunnable(listener) { - @Override - protected void doRun() { - shardOperation(request, listener, retryCount + 1); - } - }); - return; - } - } - listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); - } - }); - break; + WriteResult deleteResponseWriteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); + DeleteResponse deleteResponse = deleteResponseWriteResult.getResponse(); + UpdateResponse deleteUpdate = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(), deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getVersion(), deleteResponse.getResult()); + deleteUpdate.setGetResult(updateHelper.extractGetResult(request, indexMetaData.getIndex().getName(), deleteResponse.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + deleteUpdate.setForcedRefresh(deleteResponse.forcedRefresh()); + UpdateReplicaRequest deleteReplicaRequest = new UpdateReplicaRequest(deleteRequest); + deleteReplicaRequest.setParentTask(request.getParentTask()); + deleteReplicaRequest.setShardId(request.shardId()); + deleteReplicaRequest.setRefreshPolicy(request.getRefreshPolicy()); + return new WriteResult<>(deleteReplicaRequest, deleteUpdate, deleteResponseWriteResult.getLocation()); case NOOP: - UpdateResponse update = result.action(); - IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex()); - if (indexServiceOrNull != null) { - IndexShard shard = indexService.getShardOrNull(shardId.getId()); - if (shard != null) { - shard.noopUpdate(request.type()); - } - } - listener.onResponse(update); - break; + UpdateResponse noopUpdate = result.action(); + indexShard.noopUpdate(request.type()); + return new WriteResult<>(null, noopUpdate, null); default: throw new IllegalStateException("Illegal result " + result.getResponseResult()); } } + + @Override + protected Translog.Location onReplicaShard(UpdateReplicaRequest request, IndexShard indexShard) { + assert request.getRequest() != null; + final Translog.Location location; + switch (request.getRequest().opType()) { + case INDEX: + case CREATE: + location = TransportIndexAction.executeIndexRequestOnReplica(((IndexRequest) request.getRequest()), indexShard).getTranslogLocation(); + break; + case DELETE: + location = TransportDeleteAction.executeDeleteRequestOnReplica(((DeleteRequest) request.getRequest()), indexShard).getTranslogLocation(); + break; + default: + throw new IllegalStateException("unexpected opType [" + request.getRequest().opType().getLowercase() + "]"); + + } + return location; + } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 49206470532..c242f885f06 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.update; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -27,11 +28,8 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -63,14 +61,14 @@ import java.util.Map; /** * Helper for translating an update request to an index, delete request or update response. */ -public class UpdateHelper extends AbstractComponent { +public class UpdateHelper { private final ScriptService scriptService; + private final Logger logger; - @Inject - public UpdateHelper(Settings settings, ScriptService scriptService) { - super(settings); + public UpdateHelper(ScriptService scriptService, Logger logger) { this.scriptService = scriptService; + this.logger = logger; } /** @@ -259,7 +257,7 @@ public class UpdateHelper extends AbstractComponent { return ctx; } - private TimeValue getTTLFromScriptContext(Map ctx) { + private static TimeValue getTTLFromScriptContext(Map ctx) { Object fetchedTTL = ctx.get("_ttl"); if (fetchedTTL != null) { if (fetchedTTL instanceof Number) { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java new file mode 100644 index 00000000000..5f258a675c2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.update; + +import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.VersionType; + +import java.io.IOException; + +/** Replica request for update operation holds translated (index/delete) requests */ +public class UpdateReplicaRequest extends DocumentRequest { + private DocumentRequest request; + + public UpdateReplicaRequest() { + } + + public UpdateReplicaRequest(DocumentRequest request) { + assert !(request instanceof UpdateReplicaRequest) : "underlying request must not be a update replica request"; + this.request = request; + this.index = request.index(); + setRefreshPolicy(request.getRefreshPolicy()); + setShardId(request.shardId()); + setParentTask(request.getParentTask()); + } + + public DocumentRequest getRequest() { + return request; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + request = DocumentRequest.readDocumentRequest(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + DocumentRequest.writeDocumentRequest(out, request); + } + + @Override + public String type() { + return request.type(); + } + + @Override + public String id() { + return request.id(); + } + + @Override + public UpdateReplicaRequest routing(String routing) { + throw new UnsupportedOperationException("setting routing is not supported"); + } + + @Override + public String routing() { + return request.routing(); + } + + @Override + public String parent() { + return request.parent(); + } + + @Override + public long version() { + return request.version(); + } + + @Override + public UpdateReplicaRequest version(long version) { + throw new UnsupportedOperationException("setting version is not supported"); + } + + @Override + public VersionType versionType() { + return request.versionType(); + } + + @Override + public UpdateReplicaRequest versionType(VersionType versionType) { + throw new UnsupportedOperationException("setting version type is not supported"); + } + + @Override + public OpType opType() { + return request.opType(); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index deca938fa6a..80d3676e051 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; @@ -56,10 +53,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ -public class UpdateRequest extends InstanceShardOperationRequest - implements DocumentRequest, WriteRequest { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(UpdateRequest.class)); +public class UpdateRequest extends DocumentRequest { private String type; private String id; @@ -97,7 +91,7 @@ public class UpdateRequest extends InstanceShardOperationRequest } public UpdateRequest(String index, String type, String id) { - super(index); + this.index = index; this.type = type; this.id = id; } @@ -495,39 +489,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return OpType.UPDATE; } - @Override - public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { - this.refreshPolicy = refreshPolicy; - return this; - } - - @Override - public RefreshPolicy getRefreshPolicy() { - return refreshPolicy; - } - - public ActiveShardCount waitForActiveShards() { - return this.waitForActiveShards; - } - - /** - * Sets the number of shard copies that must be active before proceeding with the write. - * See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details. - */ - public UpdateRequest waitForActiveShards(ActiveShardCount waitForActiveShards) { - this.waitForActiveShards = waitForActiveShards; - return this; - } - - /** - * A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical - * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)} - * to get the ActiveShardCount. - */ - public UpdateRequest waitForActiveShards(final int waitForActiveShards) { - return waitForActiveShards(ActiveShardCount.from(waitForActiveShards)); - } - /** * Sets the doc to use for updates when a script is not specified. */ diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index bbbc9bafd8f..e9b111f4df9 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; +import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.DeprecationLogger; @@ -37,7 +37,7 @@ import org.elasticsearch.script.Script; import java.util.Map; -public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder +public class UpdateRequestBuilder extends ReplicationRequestBuilder implements WriteRequestBuilder { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class)); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index eb1843dc7d9..83f347d6b98 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,7 +22,6 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; -import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; @@ -182,7 +181,6 @@ public class IndicesModule extends AbstractModule { bind(SyncedFlushService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(IndicesTTLService.class).asEagerSingleton(); - bind(UpdateHelper.class).asEagerSingleton(); bind(MetaDataIndexUpgradeService.class).asEagerSingleton(); bind(NodeServicesProvider.class).asEagerSingleton(); } diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index d1d01610f18..5c692668f26 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -222,8 +222,7 @@ public class IndicesRequestIT extends ESIntegTestCase { } public void testUpdate() { - //update action goes to the primary, index op gets executed locally, then replicated - String[] updateShardActions = new String[]{UpdateAction.NAME + "[s]", IndexAction.NAME + "[r]"}; + String[] updateShardActions = new String[]{UpdateAction.NAME + "[p]", UpdateAction.NAME + "[r]"}; interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); @@ -237,8 +236,7 @@ public class IndicesRequestIT extends ESIntegTestCase { } public void testUpdateUpsert() { - //update action goes to the primary, index op gets executed locally, then replicated - String[] updateShardActions = new String[]{UpdateAction.NAME + "[s]", IndexAction.NAME + "[r]"}; + String[] updateShardActions = new String[]{UpdateAction.NAME + "[p]", UpdateAction.NAME + "[r]"}; interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); @@ -251,8 +249,7 @@ public class IndicesRequestIT extends ESIntegTestCase { } public void testUpdateDelete() { - //update action goes to the primary, delete op gets executed locally, then replicated - String[] updateShardActions = new String[]{UpdateAction.NAME + "[s]", DeleteAction.NAME + "[r]"}; + String[] updateShardActions = new String[]{UpdateAction.NAME + "[p]", UpdateAction.NAME + "[r]"}; interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index a554ca53d99..d2070fb21db 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; @@ -128,21 +129,21 @@ public class TransportWriteActionTests extends ESTestCase { resultChecker.accept(listener.response, forcedRefresh); } - private class TestAction extends TransportWriteAction { + private class TestAction extends TransportWriteAction { protected TestAction() { super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), null, null, null, null, new ActionFilters(new HashSet<>()), - new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME); + new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, TestRequest::new, ThreadPool.Names.SAME); } @Override - protected IndexShard indexShard(TestRequest request) { + protected IndexShard indexShard(ReplicatedWriteRequest request) { return indexShard; } @Override - protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { - return new WriteResult<>(new TestResponse(), location); + protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { + return new WriteResult<>(request, new TestResponse(), location); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java deleted file mode 100644 index 1d736060568..00000000000 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.single.instance; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilter; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlock; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.CapturingTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportService; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Supplier; - -import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; -import static org.elasticsearch.test.ClusterServiceUtils.setState; -import static org.hamcrest.core.IsEqual.equalTo; - -public class TransportInstanceSingleOperationActionTests extends ESTestCase { - - private static ThreadPool THREAD_POOL; - - private ClusterService clusterService; - private CapturingTransport transport; - private TransportService transportService; - - private TestTransportInstanceSingleOperationAction action; - - public static class Request extends InstanceShardOperationRequest { - public Request() { - } - } - - public static class Response extends ActionResponse { - public Response() { - } - } - - class TestTransportInstanceSingleOperationAction extends TransportInstanceSingleOperationAction { - private final Map shards = new HashMap<>(); - - public TestTransportInstanceSingleOperationAction(Settings settings, String actionName, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { - super(settings, actionName, THREAD_POOL, TransportInstanceSingleOperationActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request); - } - - public Map getResults() { - return shards; - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected void shardOperation(Request request, ActionListener listener) { - throw new UnsupportedOperationException("Not implemented in test class"); - } - - @Override - protected Response newResponse() { - return new Response(); - } - - @Override - protected void resolveRequest(ClusterState state, Request request) { - } - - @Override - protected ShardIterator shards(ClusterState clusterState, Request request) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId.getId()).primaryShardIt(); - } - } - - class MyResolver extends IndexNameExpressionResolver { - public MyResolver() { - super(Settings.EMPTY); - } - - @Override - public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { - return request.indices(); - } - } - - @BeforeClass - public static void startThreadPool() { - THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); - } - - @Before - public void setUp() throws Exception { - super.setUp(); - transport = new CapturingTransport(); - clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); - transportService.start(); - transportService.acceptIncomingRequests(); - action = new TestTransportInstanceSingleOperationAction( - Settings.EMPTY, - "indices:admin/test", - transportService, - new ActionFilters(new HashSet()), - new MyResolver(), - Request::new - ); - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - clusterService.close(); - transportService.close(); - } - - @AfterClass - public static void destroyThreadPool() { - ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); - // since static must set to null to be eligible for collection - THREAD_POOL = null; - } - - public void testGlobalBlock() { - Request request = new Request(); - PlainActionFuture listener = new PlainActionFuture<>(); - ClusterBlocks.Builder block = ClusterBlocks.builder() - .addGlobalBlock(new ClusterBlock(1, "", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); - try { - action.new AsyncSingleAction(request, listener).start(); - listener.get(); - fail("expected ClusterBlockException"); - } catch (Exception e) { - if (ExceptionsHelper.unwrap(e, ClusterBlockException.class) == null) { - logger.info("expected ClusterBlockException but got ", e); - fail("expected ClusterBlockException"); - } - } - } - - public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { - Request request = new Request().index("test"); - request.shardId = new ShardId("test", "_na_", 0); - PlainActionFuture listener = new PlainActionFuture<>(); - setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); - action.new AsyncSingleAction(request, listener).start(); - assertThat(transport.capturedRequests().length, equalTo(1)); - transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); - listener.get(); - } - - public void testFailureWithoutRetry() throws Exception { - Request request = new Request().index("test"); - request.shardId = new ShardId("test", "_na_", 0); - PlainActionFuture listener = new PlainActionFuture<>(); - setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); - - action.new AsyncSingleAction(request, listener).start(); - assertThat(transport.capturedRequests().length, equalTo(1)); - long requestId = transport.capturedRequests()[0].requestId; - transport.clear(); - // this should not trigger retry or anything and the listener should report exception immediately - transport.handleRemoteError(requestId, new TransportException("a generic transport exception", new Exception("generic test exception"))); - - try { - // result should return immediately - assertTrue(listener.isDone()); - listener.get(); - fail("this should fail with a transport exception"); - } catch (ExecutionException t) { - if (ExceptionsHelper.unwrap(t, TransportException.class) == null) { - logger.info("expected TransportException but got ", t); - fail("expected and TransportException"); - } - } - } - - public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { - Request request = new Request().index("test"); - request.shardId = new ShardId("test", "_na_", 0); - PlainActionFuture listener = new PlainActionFuture<>(); - boolean local = randomBoolean(); - setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); - action.new AsyncSingleAction(request, listener).start(); - // this should fail because primary not initialized - assertThat(transport.capturedRequests().length, equalTo(0)); - setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); - // this time it should work - assertThat(transport.capturedRequests().length, equalTo(1)); - transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); - listener.get(); - } - - public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception { - Request request = new Request().index("test"); - request.shardId = new ShardId("test", "_na_", 0); - PlainActionFuture listener = new PlainActionFuture<>(); - boolean local = randomBoolean(); - setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); - action.new AsyncSingleAction(request, listener).start(); - assertThat(transport.capturedRequests().length, equalTo(1)); - long requestId = transport.capturedRequests()[0].requestId; - transport.clear(); - DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); - transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); - // trigger cluster state observer - setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); - assertThat(transport.capturedRequests().length, equalTo(1)); - transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); - listener.get(); - } - - public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { - Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); - request.shardId = new ShardId("test", "_na_", 0); - PlainActionFuture listener = new PlainActionFuture<>(); - setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); - action.new AsyncSingleAction(request, listener).start(); - assertThat(transport.capturedRequests().length, equalTo(1)); - long requestId = transport.capturedRequests()[0].requestId; - transport.clear(); - DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); - transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); - - // wait until the timeout was triggered and we actually tried to send for the second time - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(transport.capturedRequests().length, equalTo(1)); - } - }); - - // let it fail the second time too - requestId = transport.capturedRequests()[0].requestId; - transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); - try { - // result should return immediately - assertTrue(listener.isDone()); - listener.get(); - fail("this should fail with a transport exception"); - } catch (ExecutionException t) { - if (ExceptionsHelper.unwrap(t, ConnectTransportException.class) == null) { - logger.info("expected ConnectTransportException but got ", t); - fail("expected and ConnectTransportException"); - } - } - } - - public void testUnresolvableRequestDoesNotHang() throws InterruptedException, ExecutionException, TimeoutException { - action = new TestTransportInstanceSingleOperationAction( - Settings.EMPTY, - "indices:admin/test_unresolvable", - transportService, - new ActionFilters(new HashSet<>()), - new MyResolver(), - Request::new - ) { - @Override - protected void resolveRequest(ClusterState state, Request request) { - throw new IllegalStateException("request cannot be resolved"); - } - }; - Request request = new Request().index("test"); - request.shardId = new ShardId("test", "_na_", 0); - PlainActionFuture listener = new PlainActionFuture<>(); - setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); - action.new AsyncSingleAction(request, listener).start(); - assertThat(transport.capturedRequests().length, equalTo(0)); - try { - listener.get(); - } catch (Exception e) { - if (ExceptionsHelper.unwrap(e, IllegalStateException.class) == null) { - logger.info("expected IllegalStateException but got ", e); - fail("expected and IllegalStateException"); - } - } - } -} diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index cb27a527f63..d4291464fdb 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -172,9 +172,8 @@ public class UpdateRequestTests extends ESTestCase { // Related to issue 3256 public void testUpdateRequestWithTTL() throws Exception { TimeValue providedTTLValue = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl"); - Settings settings = settings(Version.CURRENT).build(); - UpdateHelper updateHelper = new UpdateHelper(settings, null); + UpdateHelper updateHelper = new UpdateHelper(null, logger); // We just upsert one document with ttl IndexRequest indexRequest = new IndexRequest("test", "type1", "1") diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 6e200c4756a..67f0b4bf4a6 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -366,8 +366,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception { - TransportWriteAction.WriteResult result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary, - null); + TransportWriteAction.WriteResult result = + TransportIndexAction.executeIndexRequestOnPrimary(request, primary, null); request.primaryTerm(primary.getPrimaryTerm()); TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.getLocation(), logger); return new PrimaryResult(request, result.getResponse()); diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index ff4c4c657d7..7972bc39f37 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -162,8 +162,8 @@ the request was ignored. -------------------------------------------------- { "_shards": { - "total": 0, - "successful": 0, + "total": 1, + "successful": 1, "failed": 0 }, "_index": "test", From b5079ce0092e2dfd742fbe3aed8a8f95931a378d Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 6 Oct 2016 05:05:59 -0400 Subject: [PATCH 07/53] rename DocumentRequest to DocumentWriteRequest --- .../noop/action/bulk/RestNoopBulkAction.java | 4 +- .../action/bulk/TransportNoopBulkAction.java | 4 +- ...Request.java => DocumentWriteRequest.java} | 6 +-- .../action/bulk/BulkItemRequest.java | 12 ++--- .../action/bulk/BulkItemResponse.java | 2 +- .../action/bulk/BulkProcessor.java | 12 ++--- .../action/bulk/BulkRequest.java | 28 +++++------ .../action/bulk/TransportBulkAction.java | 45 +++++++++--------- .../action/bulk/TransportShardBulkAction.java | 46 +++++++------------ .../action/delete/DeleteRequest.java | 5 +- .../action/delete/TransportDeleteAction.java | 2 - .../action/index/IndexRequest.java | 5 +- .../action/index/IndexRequestBuilder.java | 4 +- .../action/ingest/IngestActionFilter.java | 8 ++-- .../TransportReplicationAction.java | 4 +- .../action/update/UpdateReplicaRequest.java | 17 +++---- .../action/update/UpdateRequest.java | 6 +-- .../org/elasticsearch/index/mapper/Uid.java | 2 - .../ingest/PipelineExecutionService.java | 6 +-- .../action/bulk/BulkRequestTests.java | 4 +- .../action/bulk/BulkWithUpdatesIT.java | 2 +- .../elasticsearch/action/bulk/RetryTests.java | 6 +-- .../action/index/IndexRequestTests.java | 10 ++-- .../ingest/BulkRequestModifierTests.java | 4 +- .../ingest/IngestActionFilterTests.java | 6 +-- .../document/DocumentActionsIT.java | 2 +- .../ingest/PipelineExecutionServiceTests.java | 4 +- .../routing/SimpleRoutingIT.java | 9 ++-- .../versioning/SimpleVersioningIT.java | 7 ++- .../AbstractAsyncBulkIndexByScrollAction.java | 6 +-- .../reindex/AsyncBulkByScrollActionTests.java | 17 ++++--- .../index/reindex/ReindexFailureTests.java | 2 +- .../index/reindex/ReindexVersioningTests.java | 2 +- 33 files changed, 134 insertions(+), 165 deletions(-) rename core/src/main/java/org/elasticsearch/action/{DocumentRequest.java => DocumentWriteRequest.java} (95%) diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 466821824a5..3add7b21c23 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -85,7 +85,7 @@ public class RestNoopBulkAction extends BaseRestHandler { } private static class BulkRestBuilderListener extends RestBuilderListener { - private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE, + private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 931e6724462..238508cf5df 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -35,7 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class TransportNoopBulkAction extends HandledTransportAction { - private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE, + private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); @Inject diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java similarity index 95% rename from core/src/main/java/org/elasticsearch/action/DocumentRequest.java rename to core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java index ef2aa815a6b..66ea6401bcc 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java @@ -34,7 +34,7 @@ import java.util.Locale; * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} */ -public abstract class DocumentRequest> extends ReplicatedWriteRequest { +public abstract class DocumentWriteRequest> extends ReplicatedWriteRequest { /** * Get the type that this request operates on @@ -153,7 +153,7 @@ public abstract class DocumentRequest> exten } /** read a document write (index/delete/update) request */ - public static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { + public static DocumentWriteRequest readDocumentRequest(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { IndexRequest indexRequest = new IndexRequest(); @@ -177,7 +177,7 @@ public abstract class DocumentRequest> exten } /** write a document write (index/delete/update) request*/ - public static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { + public static void writeDocumentRequest(StreamOutput out, DocumentWriteRequest request) throws IOException { if (request instanceof IndexRequest) { out.writeByte((byte) 0); } else if (request instanceof DeleteRequest) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index df9fd13b034..7ba3c81b5df 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -32,7 +32,7 @@ import java.io.IOException; public class BulkItemRequest implements Streamable { private int id; - private DocumentRequest request; + private DocumentWriteRequest request; private volatile BulkItemResponse primaryResponse; private volatile boolean ignoreOnReplica; @@ -40,7 +40,7 @@ public class BulkItemRequest implements Streamable { } - public BulkItemRequest(int id, DocumentRequest request) { + public BulkItemRequest(int id, DocumentWriteRequest request) { this.id = id; this.request = request; } @@ -49,7 +49,7 @@ public class BulkItemRequest implements Streamable { return id; } - public DocumentRequest request() { + public DocumentWriteRequest request() { return request; } @@ -86,7 +86,7 @@ public class BulkItemRequest implements Streamable { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - request = DocumentRequest.readDocumentRequest(in); + request = DocumentWriteRequest.readDocumentRequest(in); if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } @@ -96,7 +96,7 @@ public class BulkItemRequest implements Streamable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - DocumentRequest.writeDocumentRequest(out, request); + DocumentWriteRequest.writeDocumentRequest(out, request); out.writeOptionalStreamable(primaryResponse); out.writeBoolean(ignoreOnReplica); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 9f0714784bc..9129f9b01bf 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest.OpType; +import org.elasticsearch.action.DocumentWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index f32bfaa775c..ea07136a8c9 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; @@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkProcessor add(IndexRequest request) { - return add((DocumentRequest) request); + return add((DocumentWriteRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((DocumentRequest) request); + return add((DocumentWriteRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(DocumentRequest request) { + public BulkProcessor add(DocumentWriteRequest request) { return add(request, null); } - public BulkProcessor add(DocumentRequest request, @Nullable Object payload) { + public BulkProcessor add(DocumentWriteRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(DocumentRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocumentWriteRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 7729c737439..2ec89d55228 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -72,7 +72,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List> requests = new ArrayList<>(); + final List> requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -87,14 +87,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(DocumentRequest... requests) { - for (DocumentRequest request : requests) { + public BulkRequest add(DocumentWriteRequest... requests) { + for (DocumentWriteRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(DocumentRequest request) { + public BulkRequest add(DocumentWriteRequest request) { return add(request, null); } @@ -104,7 +104,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(DocumentRequest request, @Nullable Object payload) { + public BulkRequest add(DocumentWriteRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -120,8 +120,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable> requests) { - for (DocumentRequest request : requests) { + public BulkRequest add(Iterable> requests) { + for (DocumentWriteRequest request : requests) { add(request); } return this; @@ -207,7 +207,7 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List> requests() { + public List> requests() { return this.requests; } @@ -508,7 +508,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (DocumentRequest actionRequest : requests) { + for (DocumentWriteRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -526,7 +526,7 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (DocumentRequest request : requests) { + for (DocumentWriteRequest request : requests) { // We first check if refresh has been set if (request.getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( @@ -550,7 +550,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards = ActiveShardCount.readFrom(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - requests.add(DocumentRequest.readDocumentRequest(in)); + requests.add(DocumentWriteRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); timeout = new TimeValue(in); @@ -561,8 +561,8 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (DocumentRequest request : requests) { - DocumentRequest.writeDocumentRequest(out, request); + for (DocumentWriteRequest request : requests) { + DocumentWriteRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 37c1b7c2290..f7ea0033b5b 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -22,12 +22,11 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; @@ -116,7 +115,7 @@ public class TransportBulkAction extends HandledTransportAction autoCreateIndices = bulkRequest.requests.stream() - .map(DocumentRequest::index) + .map(DocumentWriteRequest::index) .collect(Collectors.toSet()); final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); ClusterState state = clusterService.state(); @@ -143,7 +142,7 @@ public class TransportBulkAction extends HandledTransportAction request = bulkRequest.requests.get(i); + DocumentWriteRequest request = bulkRequest.requests.get(i); if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) { bulkRequest.requests.set(i, null); } @@ -178,7 +177,7 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, DocumentRequest request, String index, Exception e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocumentWriteRequest request, String index, Exception e) { if (index.equals(request.index())) { responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); return true; @@ -209,20 +208,20 @@ public class TransportBulkAction extends HandledTransportAction Operations mapping Map> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocumentRequest request = bulkRequest.requests.get(i); + DocumentWriteRequest request = bulkRequest.requests.get(i); if (request == null) { continue; } @@ -296,9 +295,9 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = request.request(); - responses.set(request.id(), new BulkItemResponse(request.id(), documentRequest.opType(), - new BulkItemResponse.Failure(indexName, documentRequest.type(), documentRequest.id(), e))); + DocumentWriteRequest documentWriteRequest = request.request(); + responses.set(request.id(), new BulkItemResponse(request.id(), documentWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, documentWriteRequest.type(), documentWriteRequest.id(), e))); } if (counter.decrementAndGet() == 0) { finishHim(); @@ -312,9 +311,9 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, - final ConcreteIndices concreteIndices, - final MetaData metaData) { + private boolean addFailureIfIndexIsUnavailable(DocumentWriteRequest request, BulkRequest bulkRequest, AtomicArray responses, int idx, + final ConcreteIndices concreteIndices, + final MetaData metaData) { Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { @@ -358,7 +357,7 @@ public class TransportBulkAction extends HandledTransportAction itemRequest = request.items()[requestIndex].request(); + DocumentWriteRequest itemRequest = request.items()[requestIndex].request(); preVersions[requestIndex] = itemRequest.version(); preVersionTypes[requestIndex] = itemRequest.versionType(); - DocumentRequest.OpType opType = itemRequest.opType(); + DocumentWriteRequest.OpType opType = itemRequest.opType(); try { final WriteResult writeResult; switch (itemRequest.opType()) { @@ -210,7 +198,7 @@ public class TransportShardBulkAction extends TransportWriteAction) writeResult.getReplicaRequest()); + (DocumentWriteRequest) writeResult.getReplicaRequest()); // add the response setResponse(request.items()[requestIndex], new BulkItemResponse(request.items()[requestIndex].id(), opType, writeResult.getResponse())); } catch (Exception e) { @@ -218,20 +206,20 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = request.items()[j].request(); - documentRequest.version(preVersions[j]); - documentRequest.versionType(preVersionTypes[j]); + DocumentWriteRequest documentWriteRequest = request.items()[j].request(); + documentWriteRequest.version(preVersions[j]); + documentWriteRequest.versionType(preVersionTypes[j]); } throw (ElasticsearchException) e; } BulkItemRequest item = request.items()[requestIndex]; - DocumentRequest documentRequest = item.request(); + DocumentWriteRequest documentWriteRequest = item.request(); if (isConflictException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), documentRequest.opType().getLowercase(), request), e); + request.shardId(), documentWriteRequest.opType().getLowercase(), request), e); } else { logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), documentRequest.opType().getLowercase(), request), e); + request.shardId(), documentWriteRequest.opType().getLowercase(), request), e); } // if its a conflict failure, and we already executed the request on a primary (and we execute it // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) @@ -239,8 +227,8 @@ public class TransportShardBulkAction extends TransportWriteAction { +public class DeleteRequest extends DocumentWriteRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 926700e327e..d3cb4d24831 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -21,8 +21,6 @@ package org.elasticsearch.action.delete; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 48eaab2b48c..264b7e0a844 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -21,10 +21,9 @@ package org.elasticsearch.action.index; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; -import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -67,7 +66,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends DocumentRequest { +public class IndexRequest extends DocumentWriteRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index a9d8bcaa56b..3291f219a9c 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.index; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -201,7 +201,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder> { + static final class BulkRequestModifier implements Iterator> { final BulkRequest bulkRequest; final Set failedSlots; @@ -151,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public DocumentRequest next() { + public DocumentWriteRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -172,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - DocumentRequest request = bulkRequest.requests().get(i); + DocumentWriteRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 95e196672d4..cab7f1606b8 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; @@ -167,7 +167,7 @@ public abstract class TransportReplicationAction< /** helper to verify and resolve request routing */ public static void resolveAndValidateRouting(final MetaData metaData, final String concreteIndex, - DocumentRequest request) { + DocumentWriteRequest request) { request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())); // check if routing is required, if so, throw error if routing wasn't specified if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java index 5f258a675c2..1eaf3c698fd 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java @@ -19,10 +19,7 @@ package org.elasticsearch.action.update; -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.VersionType; @@ -30,13 +27,13 @@ import org.elasticsearch.index.VersionType; import java.io.IOException; /** Replica request for update operation holds translated (index/delete) requests */ -public class UpdateReplicaRequest extends DocumentRequest { - private DocumentRequest request; +public class UpdateReplicaRequest extends DocumentWriteRequest { + private DocumentWriteRequest request; public UpdateReplicaRequest() { } - public UpdateReplicaRequest(DocumentRequest request) { + public UpdateReplicaRequest(DocumentWriteRequest request) { assert !(request instanceof UpdateReplicaRequest) : "underlying request must not be a update replica request"; this.request = request; this.index = request.index(); @@ -45,20 +42,20 @@ public class UpdateReplicaRequest extends DocumentRequest setParentTask(request.getParentTask()); } - public DocumentRequest getRequest() { + public DocumentWriteRequest getRequest() { return request; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - request = DocumentRequest.readDocumentRequest(in); + request = DocumentWriteRequest.readDocumentRequest(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - DocumentRequest.writeDocumentRequest(out, request); + DocumentWriteRequest.writeDocumentRequest(out, request); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 80d3676e051..d00f7c046f6 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.common.Nullable; @@ -29,8 +29,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -53,7 +51,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ -public class UpdateRequest extends DocumentRequest { +public class UpdateRequest extends DocumentWriteRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java index 2a8938b4ab7..344c8dc0cc0 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java @@ -21,12 +21,10 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.common.lucene.BytesRefs; import java.util.Collection; import java.util.Collections; -import java.util.List; /** * diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 57eb7afcb5a..2a5217a9f25 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable> actionRequests, + public void executeBulkRequest(Iterable> actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (DocumentRequest actionRequest : actionRequests) { + for (DocumentWriteRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 230373f7415..f0d4f35ff56 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List> requests = new ArrayList<>(); + List> requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 590a503a654..0e2fec98abc 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -47,7 +47,7 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.function.Function; -import static org.elasticsearch.action.DocumentRequest.OpType; +import static org.elasticsearch.action.DocumentWriteRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 72bdc8a58f9..7d79a91eb17 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -20,12 +20,8 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.DocumentRequest.OpType; -import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.DocumentWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index da25ec4261f..94fa533a674 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.unit.TimeValue; @@ -49,13 +49,13 @@ public class IndexRequestTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest(""); indexRequest.opType(create); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); + assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.CREATE)); indexRequest.opType(createUpper); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); + assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.CREATE)); indexRequest.opType(index); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); + assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.INDEX)); indexRequest.opType(indexUpper); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); + assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.INDEX)); } public void testReadBogusString() { diff --git a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java index 8dac5853cac..87adf3cf5b9 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; */ import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -116,7 +116,7 @@ public class BulkRequestModifierTests extends ESTestCase { }); List originalResponses = new ArrayList<>(); - for (DocumentRequest actionRequest : bulkRequest.requests()) { + for (DocumentWriteRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, true); originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java index 2b9f9c55320..2ead142521d 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { if (rarely()) { - DocumentRequest request; + DocumentWriteRequest request; if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); } else { @@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase { verifyZeroInteractions(actionListener); int assertedRequests = 0; - for (DocumentRequest actionRequest : bulkRequest.requests()) { + for (DocumentWriteRequest actionRequest : bulkRequest.requests()) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; assertThat(indexRequest.sourceAsMap().size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index d198529f8d4..ac67a86a714 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -37,7 +37,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; -import static org.elasticsearch.action.DocumentRequest.OpType; +import static org.elasticsearch.action.DocumentWriteRequest.OpType; import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; import static org.elasticsearch.client.Requests.getRequest; import static org.elasticsearch.client.Requests.indexRequest; diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index 8b22e4f0bc8..a1a6d612850 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -317,7 +317,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); int numIndexRequests = 0; for (int i = 0; i < numRequest; i++) { - DocumentRequest request; + DocumentWriteRequest request; if (randomBoolean()) { if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index 5980f781e2e..eaa107c14ed 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -20,8 +20,7 @@ package org.elasticsearch.routing; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -261,7 +260,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.INDEX)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentWriteRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -282,7 +281,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.UPDATE)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentWriteRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -303,7 +302,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.DELETE)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentWriteRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 417defee5fa..a0326a2cc2c 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -21,10 +21,9 @@ package org.elasticsearch.versioning; import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.uid.Versions; @@ -690,7 +689,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(DocumentRequest.OpType.INDEX) + .setOpType(DocumentWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() @@ -759,7 +758,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(DocumentRequest.OpType.INDEX) + .setOpType(DocumentWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index 1f135500dfd..8c95b7b2f86 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -154,9 +154,9 @@ public abstract class AbstractAsyncBulkIndexByScrollAction> { + interface RequestWrapper> { void setIndex(String index); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 4ea7f039970..797f2bcb4dc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse.Result; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -49,7 +49,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; @@ -261,27 +260,27 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { ShardId shardId = new ShardId(new Index("name", "uid"), 0); if (rarely()) { versionConflicts++; - responses[i] = new BulkItemResponse(i, randomFrom(DocumentRequest.OpType.values()), + responses[i] = new BulkItemResponse(i, randomFrom(DocumentWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test"))); continue; } boolean createdResponse; - DocumentRequest.OpType opType; + DocumentWriteRequest.OpType opType; switch (randomIntBetween(0, 2)) { case 0: createdResponse = true; - opType = DocumentRequest.OpType.CREATE; + opType = DocumentWriteRequest.OpType.CREATE; created++; break; case 1: createdResponse = false; - opType = randomFrom(DocumentRequest.OpType.INDEX, DocumentRequest.OpType.UPDATE); + opType = randomFrom(DocumentWriteRequest.OpType.INDEX, DocumentWriteRequest.OpType.UPDATE); updated++; break; case 2: createdResponse = false; - opType = DocumentRequest.OpType.DELETE; + opType = DocumentWriteRequest.OpType.DELETE; deleted++; break; default: @@ -363,7 +362,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] - {new BulkItemResponse(0, DocumentRequest.OpType.CREATE, failure)}, randomLong()); + {new BulkItemResponse(0, DocumentWriteRequest.OpType.CREATE, failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -769,7 +768,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()]; for (int i = 0; i < bulk.requests().size(); i++) { - DocumentRequest item = bulk.requests().get(i); + DocumentWriteRequest item = bulk.requests().get(i); DocWriteResponse response; ShardId shardId = new ShardId(new Index(item.index(), "uuid"), 0); if (item instanceof IndexRequest) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index c909ea42ecb..3bca1a2014f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; +import static org.elasticsearch.action.DocumentWriteRequest.OpType.CREATE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java index 041c796b173..befd9c7cdc2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.get.GetResponse; -import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; +import static org.elasticsearch.action.DocumentWriteRequest.OpType.CREATE; import static org.elasticsearch.index.VersionType.EXTERNAL; import static org.elasticsearch.index.VersionType.INTERNAL; From 42bc2d15bedb0f3b457bbfe06247311b9667b7c9 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 6 Oct 2016 14:25:53 -0400 Subject: [PATCH 08/53] fix bug in bulk replication for noop update operation --- .../action/DocumentWriteRequest.java | 1 + .../action/bulk/TransportShardBulkAction.java | 15 ++++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java index 66ea6401bcc..490aa7fd326 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java @@ -178,6 +178,7 @@ public abstract class DocumentWriteRequest> /** write a document write (index/delete/update) request*/ public static void writeDocumentRequest(StreamOutput out, DocumentWriteRequest request) throws IOException { + assert request != null : "request must not be null"; if (request instanceof IndexRequest) { out.writeByte((byte) 0); } else if (request instanceof DeleteRequest) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 9a58817c188..b25483268fa 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -168,7 +168,7 @@ public class TransportShardBulkAction extends TransportWriteAction itemRequest = request.items()[requestIndex].request(); + DocumentWriteRequest itemRequest = request.items()[requestIndex].request(); preVersions[requestIndex] = itemRequest.version(); preVersionTypes[requestIndex] = itemRequest.versionType(); DocumentWriteRequest.OpType opType = itemRequest.opType(); @@ -196,9 +196,14 @@ public class TransportShardBulkAction extends TransportWriteAction) writeResult.getReplicaRequest()); + DocumentWriteRequest replicaRequest = (DocumentWriteRequest) writeResult.getReplicaRequest(); + if (replicaRequest != null) { + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), replicaRequest); + } else { + request.items()[requestIndex].setIgnoreOnReplica(); + } // add the response setResponse(request.items()[requestIndex], new BulkItemResponse(request.items()[requestIndex].id(), opType, writeResult.getResponse())); } catch (Exception e) { @@ -206,14 +211,14 @@ public class TransportShardBulkAction extends TransportWriteAction documentWriteRequest = request.items()[j].request(); + DocumentWriteRequest documentWriteRequest = request.items()[j].request(); documentWriteRequest.version(preVersions[j]); documentWriteRequest.versionType(preVersionTypes[j]); } throw (ElasticsearchException) e; } BulkItemRequest item = request.items()[requestIndex]; - DocumentWriteRequest documentWriteRequest = item.request(); + DocumentWriteRequest documentWriteRequest = item.request(); if (isConflictException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), documentWriteRequest.opType().getLowercase(), request), e); From 2a651fc296a6c2b16cc1066e039d5e7f6f5121f9 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 6 Oct 2016 14:53:04 -0400 Subject: [PATCH 09/53] remove duplicate logic for request resolution and routing verification --- .../action/update/TransportUpdateAction.java | 8 -------- 1 file changed, 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 1f3a97a25a2..ee33594ebb6 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -100,14 +100,6 @@ public class TransportUpdateAction extends TransportWriteAction listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API From 5bbdcd6416078233664bb04f1f03a3d0e136876a Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 7 Oct 2016 17:48:44 -0400 Subject: [PATCH 10/53] Revert "remove duplicate logic for request resolution and routing verification" This reverts commit 2a651fc296a6c2b16cc1066e039d5e7f6f5121f9. --- .../action/update/TransportUpdateAction.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index ee33594ebb6..1f3a97a25a2 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -100,6 +100,14 @@ public class TransportUpdateAction extends TransportWriteAction listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API From 68c82cd113e0a79916ea9fce6546139de7753731 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 7 Oct 2016 17:49:57 -0400 Subject: [PATCH 11/53] Revert "fix bug in bulk replication for noop update operation" This reverts commit 42bc2d15bedb0f3b457bbfe06247311b9667b7c9. --- .../action/DocumentWriteRequest.java | 1 - .../action/bulk/TransportShardBulkAction.java | 15 +++++---------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java index 490aa7fd326..66ea6401bcc 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java @@ -178,7 +178,6 @@ public abstract class DocumentWriteRequest> /** write a document write (index/delete/update) request*/ public static void writeDocumentRequest(StreamOutput out, DocumentWriteRequest request) throws IOException { - assert request != null : "request must not be null"; if (request instanceof IndexRequest) { out.writeByte((byte) 0); } else if (request instanceof DeleteRequest) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index b25483268fa..9a58817c188 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -168,7 +168,7 @@ public class TransportShardBulkAction extends TransportWriteAction itemRequest = request.items()[requestIndex].request(); preVersions[requestIndex] = itemRequest.version(); preVersionTypes[requestIndex] = itemRequest.versionType(); DocumentWriteRequest.OpType opType = itemRequest.opType(); @@ -196,14 +196,9 @@ public class TransportShardBulkAction extends TransportWriteAction) writeResult.getReplicaRequest()); // add the response setResponse(request.items()[requestIndex], new BulkItemResponse(request.items()[requestIndex].id(), opType, writeResult.getResponse())); } catch (Exception e) { @@ -211,14 +206,14 @@ public class TransportShardBulkAction extends TransportWriteAction documentWriteRequest = request.items()[j].request(); documentWriteRequest.version(preVersions[j]); documentWriteRequest.versionType(preVersionTypes[j]); } throw (ElasticsearchException) e; } BulkItemRequest item = request.items()[requestIndex]; - DocumentWriteRequest documentWriteRequest = item.request(); + DocumentWriteRequest documentWriteRequest = item.request(); if (isConflictException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), documentWriteRequest.opType().getLowercase(), request), e); From 396f80c963a44992d3e60531c15adee8c50aaed5 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 7 Oct 2016 17:50:07 -0400 Subject: [PATCH 12/53] Revert "rename DocumentRequest to DocumentWriteRequest" This reverts commit b5079ce0092e2dfd742fbe3aed8a8f95931a378d. --- .../noop/action/bulk/RestNoopBulkAction.java | 4 +- .../action/bulk/TransportNoopBulkAction.java | 4 +- ...WriteRequest.java => DocumentRequest.java} | 6 +-- .../action/bulk/BulkItemRequest.java | 12 ++--- .../action/bulk/BulkItemResponse.java | 2 +- .../action/bulk/BulkProcessor.java | 12 ++--- .../action/bulk/BulkRequest.java | 28 +++++------ .../action/bulk/TransportBulkAction.java | 45 +++++++++--------- .../action/bulk/TransportShardBulkAction.java | 46 ++++++++++++------- .../action/delete/DeleteRequest.java | 5 +- .../action/delete/TransportDeleteAction.java | 2 + .../action/index/IndexRequest.java | 5 +- .../action/index/IndexRequestBuilder.java | 4 +- .../action/ingest/IngestActionFilter.java | 8 ++-- .../TransportReplicationAction.java | 4 +- .../action/update/UpdateReplicaRequest.java | 17 ++++--- .../action/update/UpdateRequest.java | 6 ++- .../org/elasticsearch/index/mapper/Uid.java | 2 + .../ingest/PipelineExecutionService.java | 6 +-- .../action/bulk/BulkRequestTests.java | 4 +- .../action/bulk/BulkWithUpdatesIT.java | 2 +- .../elasticsearch/action/bulk/RetryTests.java | 6 ++- .../action/index/IndexRequestTests.java | 10 ++-- .../ingest/BulkRequestModifierTests.java | 4 +- .../ingest/IngestActionFilterTests.java | 6 +-- .../document/DocumentActionsIT.java | 2 +- .../ingest/PipelineExecutionServiceTests.java | 4 +- .../routing/SimpleRoutingIT.java | 9 ++-- .../versioning/SimpleVersioningIT.java | 7 +-- .../AbstractAsyncBulkIndexByScrollAction.java | 6 +-- .../reindex/AsyncBulkByScrollActionTests.java | 17 +++---- .../index/reindex/ReindexFailureTests.java | 2 +- .../index/reindex/ReindexVersioningTests.java | 2 +- 33 files changed, 165 insertions(+), 134 deletions(-) rename core/src/main/java/org/elasticsearch/action/{DocumentWriteRequest.java => DocumentRequest.java} (95%) diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 3add7b21c23..466821824a5 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -85,7 +85,7 @@ public class RestNoopBulkAction extends BaseRestHandler { } private static class BulkRestBuilderListener extends RestBuilderListener { - private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentWriteRequest.OpType.UPDATE, + private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 238508cf5df..931e6724462 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -35,7 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class TransportNoopBulkAction extends HandledTransportAction { - private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentWriteRequest.OpType.UPDATE, + private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); @Inject diff --git a/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java similarity index 95% rename from core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java rename to core/src/main/java/org/elasticsearch/action/DocumentRequest.java index 66ea6401bcc..ef2aa815a6b 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentWriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -34,7 +34,7 @@ import java.util.Locale; * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} */ -public abstract class DocumentWriteRequest> extends ReplicatedWriteRequest { +public abstract class DocumentRequest> extends ReplicatedWriteRequest { /** * Get the type that this request operates on @@ -153,7 +153,7 @@ public abstract class DocumentWriteRequest> } /** read a document write (index/delete/update) request */ - public static DocumentWriteRequest readDocumentRequest(StreamInput in) throws IOException { + public static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { IndexRequest indexRequest = new IndexRequest(); @@ -177,7 +177,7 @@ public abstract class DocumentWriteRequest> } /** write a document write (index/delete/update) request*/ - public static void writeDocumentRequest(StreamOutput out, DocumentWriteRequest request) throws IOException { + public static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { if (request instanceof IndexRequest) { out.writeByte((byte) 0); } else if (request instanceof DeleteRequest) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 7ba3c81b5df..df9fd13b034 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -32,7 +32,7 @@ import java.io.IOException; public class BulkItemRequest implements Streamable { private int id; - private DocumentWriteRequest request; + private DocumentRequest request; private volatile BulkItemResponse primaryResponse; private volatile boolean ignoreOnReplica; @@ -40,7 +40,7 @@ public class BulkItemRequest implements Streamable { } - public BulkItemRequest(int id, DocumentWriteRequest request) { + public BulkItemRequest(int id, DocumentRequest request) { this.id = id; this.request = request; } @@ -49,7 +49,7 @@ public class BulkItemRequest implements Streamable { return id; } - public DocumentWriteRequest request() { + public DocumentRequest request() { return request; } @@ -86,7 +86,7 @@ public class BulkItemRequest implements Streamable { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - request = DocumentWriteRequest.readDocumentRequest(in); + request = DocumentRequest.readDocumentRequest(in); if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } @@ -96,7 +96,7 @@ public class BulkItemRequest implements Streamable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - DocumentWriteRequest.writeDocumentRequest(out, request); + DocumentRequest.writeDocumentRequest(out, request); out.writeOptionalStreamable(primaryResponse); out.writeBoolean(ignoreOnReplica); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 9129f9b01bf..9f0714784bc 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentWriteRequest.OpType; +import org.elasticsearch.action.DocumentRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index ea07136a8c9..f32bfaa775c 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; @@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkProcessor add(IndexRequest request) { - return add((DocumentWriteRequest) request); + return add((DocumentRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((DocumentWriteRequest) request); + return add((DocumentRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(DocumentWriteRequest request) { + public BulkProcessor add(DocumentRequest request) { return add(request, null); } - public BulkProcessor add(DocumentWriteRequest request, @Nullable Object payload) { + public BulkProcessor add(DocumentRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(DocumentWriteRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocumentRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 2ec89d55228..7729c737439 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -72,7 +72,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List> requests = new ArrayList<>(); + final List> requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -87,14 +87,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(DocumentWriteRequest... requests) { - for (DocumentWriteRequest request : requests) { + public BulkRequest add(DocumentRequest... requests) { + for (DocumentRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(DocumentWriteRequest request) { + public BulkRequest add(DocumentRequest request) { return add(request, null); } @@ -104,7 +104,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(DocumentWriteRequest request, @Nullable Object payload) { + public BulkRequest add(DocumentRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -120,8 +120,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable> requests) { - for (DocumentWriteRequest request : requests) { + public BulkRequest add(Iterable> requests) { + for (DocumentRequest request : requests) { add(request); } return this; @@ -207,7 +207,7 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List> requests() { + public List> requests() { return this.requests; } @@ -508,7 +508,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (DocumentWriteRequest actionRequest : requests) { + for (DocumentRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -526,7 +526,7 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (DocumentWriteRequest request : requests) { + for (DocumentRequest request : requests) { // We first check if refresh has been set if (request.getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( @@ -550,7 +550,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards = ActiveShardCount.readFrom(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - requests.add(DocumentWriteRequest.readDocumentRequest(in)); + requests.add(DocumentRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); timeout = new TimeValue(in); @@ -561,8 +561,8 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (DocumentWriteRequest request : requests) { - DocumentWriteRequest.writeDocumentRequest(out, request); + for (DocumentRequest request : requests) { + DocumentRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f7ea0033b5b..37c1b7c2290 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -22,11 +22,12 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; @@ -115,7 +116,7 @@ public class TransportBulkAction extends HandledTransportAction autoCreateIndices = bulkRequest.requests.stream() - .map(DocumentWriteRequest::index) + .map(DocumentRequest::index) .collect(Collectors.toSet()); final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); ClusterState state = clusterService.state(); @@ -142,7 +143,7 @@ public class TransportBulkAction extends HandledTransportAction request = bulkRequest.requests.get(i); + DocumentRequest request = bulkRequest.requests.get(i); if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) { bulkRequest.requests.set(i, null); } @@ -177,7 +178,7 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, DocumentWriteRequest request, String index, Exception e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocumentRequest request, String index, Exception e) { if (index.equals(request.index())) { responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); return true; @@ -208,20 +209,20 @@ public class TransportBulkAction extends HandledTransportAction Operations mapping Map> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocumentWriteRequest request = bulkRequest.requests.get(i); + DocumentRequest request = bulkRequest.requests.get(i); if (request == null) { continue; } @@ -295,9 +296,9 @@ public class TransportBulkAction extends HandledTransportAction documentWriteRequest = request.request(); - responses.set(request.id(), new BulkItemResponse(request.id(), documentWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, documentWriteRequest.type(), documentWriteRequest.id(), e))); + DocumentRequest documentRequest = request.request(); + responses.set(request.id(), new BulkItemResponse(request.id(), documentRequest.opType(), + new BulkItemResponse.Failure(indexName, documentRequest.type(), documentRequest.id(), e))); } if (counter.decrementAndGet() == 0) { finishHim(); @@ -311,9 +312,9 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, - final ConcreteIndices concreteIndices, - final MetaData metaData) { + private boolean addFailureIfIndexIsUnavailable(DocumentRequest request, BulkRequest bulkRequest, AtomicArray responses, int idx, + final ConcreteIndices concreteIndices, + final MetaData metaData) { Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { @@ -357,7 +358,7 @@ public class TransportBulkAction extends HandledTransportAction itemRequest = request.items()[requestIndex].request(); + DocumentRequest itemRequest = request.items()[requestIndex].request(); preVersions[requestIndex] = itemRequest.version(); preVersionTypes[requestIndex] = itemRequest.versionType(); - DocumentWriteRequest.OpType opType = itemRequest.opType(); + DocumentRequest.OpType opType = itemRequest.opType(); try { final WriteResult writeResult; switch (itemRequest.opType()) { @@ -198,7 +210,7 @@ public class TransportShardBulkAction extends TransportWriteAction) writeResult.getReplicaRequest()); + (DocumentRequest) writeResult.getReplicaRequest()); // add the response setResponse(request.items()[requestIndex], new BulkItemResponse(request.items()[requestIndex].id(), opType, writeResult.getResponse())); } catch (Exception e) { @@ -206,20 +218,20 @@ public class TransportShardBulkAction extends TransportWriteAction documentWriteRequest = request.items()[j].request(); - documentWriteRequest.version(preVersions[j]); - documentWriteRequest.versionType(preVersionTypes[j]); + DocumentRequest documentRequest = request.items()[j].request(); + documentRequest.version(preVersions[j]); + documentRequest.versionType(preVersionTypes[j]); } throw (ElasticsearchException) e; } BulkItemRequest item = request.items()[requestIndex]; - DocumentWriteRequest documentWriteRequest = item.request(); + DocumentRequest documentRequest = item.request(); if (isConflictException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), documentWriteRequest.opType().getLowercase(), request), e); + request.shardId(), documentRequest.opType().getLowercase(), request), e); } else { logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), documentWriteRequest.opType().getLowercase(), request), e); + request.shardId(), documentRequest.opType().getLowercase(), request), e); } // if its a conflict failure, and we already executed the request on a primary (and we execute it // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) @@ -227,8 +239,8 @@ public class TransportShardBulkAction extends TransportWriteAction { +public class DeleteRequest extends DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index d3cb4d24831..926700e327e 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -21,6 +21,8 @@ package org.elasticsearch.action.delete; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 264b7e0a844..48eaab2b48c 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -21,9 +21,10 @@ package org.elasticsearch.action.index; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -66,7 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends DocumentWriteRequest { +public class IndexRequest extends DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 3291f219a9c..a9d8bcaa56b 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.index; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -201,7 +201,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder> { + static final class BulkRequestModifier implements Iterator> { final BulkRequest bulkRequest; final Set failedSlots; @@ -151,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public DocumentWriteRequest next() { + public DocumentRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -172,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - DocumentWriteRequest request = bulkRequest.requests().get(i); + DocumentRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index cab7f1606b8..95e196672d4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; @@ -167,7 +167,7 @@ public abstract class TransportReplicationAction< /** helper to verify and resolve request routing */ public static void resolveAndValidateRouting(final MetaData metaData, final String concreteIndex, - DocumentWriteRequest request) { + DocumentRequest request) { request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())); // check if routing is required, if so, throw error if routing wasn't specified if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java index 1eaf3c698fd..5f258a675c2 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java @@ -19,7 +19,10 @@ package org.elasticsearch.action.update; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.VersionType; @@ -27,13 +30,13 @@ import org.elasticsearch.index.VersionType; import java.io.IOException; /** Replica request for update operation holds translated (index/delete) requests */ -public class UpdateReplicaRequest extends DocumentWriteRequest { - private DocumentWriteRequest request; +public class UpdateReplicaRequest extends DocumentRequest { + private DocumentRequest request; public UpdateReplicaRequest() { } - public UpdateReplicaRequest(DocumentWriteRequest request) { + public UpdateReplicaRequest(DocumentRequest request) { assert !(request instanceof UpdateReplicaRequest) : "underlying request must not be a update replica request"; this.request = request; this.index = request.index(); @@ -42,20 +45,20 @@ public class UpdateReplicaRequest extends DocumentWriteRequest getRequest() { + public DocumentRequest getRequest() { return request; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - request = DocumentWriteRequest.readDocumentRequest(in); + request = DocumentRequest.readDocumentRequest(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - DocumentWriteRequest.writeDocumentRequest(out, request); + DocumentRequest.writeDocumentRequest(out, request); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index d00f7c046f6..80d3676e051 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.common.Nullable; @@ -29,6 +29,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -51,7 +53,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ -public class UpdateRequest extends DocumentWriteRequest { +public class UpdateRequest extends DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java index 344c8dc0cc0..2a8938b4ab7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java @@ -21,10 +21,12 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.common.lucene.BytesRefs; import java.util.Collection; import java.util.Collections; +import java.util.List; /** * diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 2a5217a9f25..57eb7afcb5a 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable> actionRequests, + public void executeBulkRequest(Iterable> actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (DocumentWriteRequest actionRequest : actionRequests) { + for (DocumentRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index f0d4f35ff56..230373f7415 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List> requests = new ArrayList<>(); + List> requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 0e2fec98abc..590a503a654 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -47,7 +47,7 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.function.Function; -import static org.elasticsearch.action.DocumentWriteRequest.OpType; +import static org.elasticsearch.action.DocumentRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 7d79a91eb17..72bdc8a58f9 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -20,8 +20,12 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentWriteRequest.OpType; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocumentRequest.OpType; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 94fa533a674..da25ec4261f 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.unit.TimeValue; @@ -49,13 +49,13 @@ public class IndexRequestTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest(""); indexRequest.opType(create); - assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.CREATE)); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); indexRequest.opType(createUpper); - assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.CREATE)); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); indexRequest.opType(index); - assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.INDEX)); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); indexRequest.opType(indexUpper); - assertThat(indexRequest.opType() , equalTo(DocumentWriteRequest.OpType.INDEX)); + assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); } public void testReadBogusString() { diff --git a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java index 87adf3cf5b9..8dac5853cac 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; */ import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -116,7 +116,7 @@ public class BulkRequestModifierTests extends ESTestCase { }); List originalResponses = new ArrayList<>(); - for (DocumentWriteRequest actionRequest : bulkRequest.requests()) { + for (DocumentRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, true); originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java index 2ead142521d..2b9f9c55320 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { if (rarely()) { - DocumentWriteRequest request; + DocumentRequest request; if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); } else { @@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase { verifyZeroInteractions(actionListener); int assertedRequests = 0; - for (DocumentWriteRequest actionRequest : bulkRequest.requests()) { + for (DocumentRequest actionRequest : bulkRequest.requests()) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; assertThat(indexRequest.sourceAsMap().size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index ac67a86a714..d198529f8d4 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -37,7 +37,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; -import static org.elasticsearch.action.DocumentWriteRequest.OpType; +import static org.elasticsearch.action.DocumentRequest.OpType; import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; import static org.elasticsearch.client.Requests.getRequest; import static org.elasticsearch.client.Requests.indexRequest; diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index a1a6d612850..8b22e4f0bc8 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -317,7 +317,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); int numIndexRequests = 0; for (int i = 0; i < numRequest; i++) { - DocumentWriteRequest request; + DocumentRequest request; if (randomBoolean()) { if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index eaa107c14ed..5980f781e2e 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -20,7 +20,8 @@ package org.elasticsearch.routing; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -260,7 +261,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentWriteRequest.OpType.INDEX)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -281,7 +282,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentWriteRequest.OpType.UPDATE)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -302,7 +303,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentWriteRequest.OpType.DELETE)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index a0326a2cc2c..417defee5fa 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -21,9 +21,10 @@ package org.elasticsearch.versioning; import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.uid.Versions; @@ -689,7 +690,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(DocumentWriteRequest.OpType.INDEX) + .setOpType(DocumentRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() @@ -758,7 +759,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(DocumentWriteRequest.OpType.INDEX) + .setOpType(DocumentRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index 8c95b7b2f86..1f135500dfd 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -154,9 +154,9 @@ public abstract class AbstractAsyncBulkIndexByScrollAction> { + interface RequestWrapper> { void setIndex(String index); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 797f2bcb4dc..4ea7f039970 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse.Result; -import org.elasticsearch.action.DocumentWriteRequest; +import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -49,6 +49,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; @@ -260,27 +261,27 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { ShardId shardId = new ShardId(new Index("name", "uid"), 0); if (rarely()) { versionConflicts++; - responses[i] = new BulkItemResponse(i, randomFrom(DocumentWriteRequest.OpType.values()), + responses[i] = new BulkItemResponse(i, randomFrom(DocumentRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test"))); continue; } boolean createdResponse; - DocumentWriteRequest.OpType opType; + DocumentRequest.OpType opType; switch (randomIntBetween(0, 2)) { case 0: createdResponse = true; - opType = DocumentWriteRequest.OpType.CREATE; + opType = DocumentRequest.OpType.CREATE; created++; break; case 1: createdResponse = false; - opType = randomFrom(DocumentWriteRequest.OpType.INDEX, DocumentWriteRequest.OpType.UPDATE); + opType = randomFrom(DocumentRequest.OpType.INDEX, DocumentRequest.OpType.UPDATE); updated++; break; case 2: createdResponse = false; - opType = DocumentWriteRequest.OpType.DELETE; + opType = DocumentRequest.OpType.DELETE; deleted++; break; default: @@ -362,7 +363,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] - {new BulkItemResponse(0, DocumentWriteRequest.OpType.CREATE, failure)}, randomLong()); + {new BulkItemResponse(0, DocumentRequest.OpType.CREATE, failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -768,7 +769,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()]; for (int i = 0; i < bulk.requests().size(); i++) { - DocumentWriteRequest item = bulk.requests().get(i); + DocumentRequest item = bulk.requests().get(i); DocWriteResponse response; ShardId shardId = new ShardId(new Index(item.index(), "uuid"), 0); if (item instanceof IndexRequest) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index 3bca1a2014f..c909ea42ecb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static org.elasticsearch.action.DocumentWriteRequest.OpType.CREATE; +import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java index befd9c7cdc2..041c796b173 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.get.GetResponse; -import static org.elasticsearch.action.DocumentWriteRequest.OpType.CREATE; +import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; import static org.elasticsearch.index.VersionType.EXTERNAL; import static org.elasticsearch.index.VersionType.INTERNAL; From 97a67565219ea2834bf30dfb0fa23e64d7a1c586 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 7 Oct 2016 17:50:15 -0400 Subject: [PATCH 13/53] Revert "Make update a replication action" This reverts commit eee0d18f94108b5ece7b18a450bbcc5c729d9311. --- .../elasticsearch/action/DocumentRequest.java | 61 ++-- .../action/bulk/BulkItemRequest.java | 3 + .../action/bulk/BulkRequest.java | 4 +- .../action/bulk/TransportBulkAction.java | 10 +- .../action/bulk/TransportShardBulkAction.java | 173 ++++----- .../action/delete/DeleteRequest.java | 2 +- .../action/delete/TransportDeleteAction.java | 26 +- .../action/index/IndexRequest.java | 2 +- .../action/index/TransportIndexAction.java | 12 +- .../replication/ReplicationOperation.java | 36 +- .../TransportReplicationAction.java | 16 +- .../replication/TransportWriteAction.java | 36 +- .../InstanceShardOperationRequest.java | 138 ++++++++ .../InstanceShardOperationRequestBuilder.java | 60 ++++ ...ransportInstanceSingleOperationAction.java | 270 +++++++++++++++ .../action/update/TransportUpdateAction.java | 293 +++++++++------- .../action/update/UpdateHelper.java | 14 +- .../action/update/UpdateReplicaRequest.java | 113 ------ .../action/update/UpdateRequest.java | 43 ++- .../action/update/UpdateRequestBuilder.java | 4 +- .../elasticsearch/indices/IndicesModule.java | 2 + .../action/IndicesRequestIT.java | 9 +- .../TransportWriteActionTests.java | 11 +- ...ortInstanceSingleOperationActionTests.java | 327 ++++++++++++++++++ .../action/update/UpdateRequestTests.java | 3 +- .../ESIndexLevelReplicationTestCase.java | 4 +- docs/reference/docs/update.asciidoc | 4 +- 27 files changed, 1218 insertions(+), 458 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java delete mode 100644 core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java create mode 100644 core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index ef2aa815a6b..f4c88e159c7 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -20,11 +20,10 @@ package org.elasticsearch.action; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateReplicaRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.index.VersionType; import java.io.IOException; @@ -34,72 +33,84 @@ import java.util.Locale; * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} */ -public abstract class DocumentRequest> extends ReplicatedWriteRequest { +public interface DocumentRequest extends IndicesRequest { + + /** + * Get the index that this request operates on + * @return the index + */ + String index(); /** * Get the type that this request operates on * @return the type */ - public abstract String type(); + String type(); /** * Get the id of the document for this request * @return the id */ - public abstract String id(); + String id(); + + /** + * Get the options for this request + * @return the indices options + */ + IndicesOptions indicesOptions(); /** * Set the routing for this request * @return the Request */ - public abstract T routing(String routing); + T routing(String routing); /** * Get the routing for this request * @return the Routing */ - public abstract String routing(); + String routing(); /** * Get the parent for this request * @return the Parent */ - public abstract String parent(); + String parent(); /** * Get the document version for this request * @return the document version */ - public abstract long version(); + long version(); /** * Sets the version, which will perform the operation only if a matching * version exists and no changes happened on the doc since then. */ - public abstract T version(long version); + T version(long version); /** * Get the document version type for this request * @return the document version type */ - public abstract VersionType versionType(); + VersionType versionType(); /** * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. */ - public abstract T versionType(VersionType versionType); + T versionType(VersionType versionType); /** * Get the requested document operation type of the request * @return the operation type {@link OpType} */ - public abstract OpType opType(); + OpType opType(); /** * Requested operation type to perform on the document */ - public enum OpType { + enum OpType { /** * Index the source. If there an existing document with the id, it will * be replaced. @@ -153,42 +164,40 @@ public abstract class DocumentRequest> exten } /** read a document write (index/delete/update) request */ - public static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { + static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { byte type = in.readByte(); + final DocumentRequest documentRequest; if (type == 0) { IndexRequest indexRequest = new IndexRequest(); indexRequest.readFrom(in); - return indexRequest; + documentRequest = indexRequest; } else if (type == 1) { DeleteRequest deleteRequest = new DeleteRequest(); deleteRequest.readFrom(in); - return deleteRequest; + documentRequest = deleteRequest; } else if (type == 2) { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.readFrom(in); - return updateRequest; - } else if (type == 3) { - UpdateReplicaRequest updateReplicaRequest = new UpdateReplicaRequest(); - updateReplicaRequest.readFrom(in); - return updateReplicaRequest; + documentRequest = updateRequest; } else { throw new IllegalStateException("invalid request type [" + type+ " ]"); } + return documentRequest; } /** write a document write (index/delete/update) request*/ - public static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { + static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { if (request instanceof IndexRequest) { out.writeByte((byte) 0); + ((IndexRequest) request).writeTo(out); } else if (request instanceof DeleteRequest) { out.writeByte((byte) 1); + ((DeleteRequest) request).writeTo(out); } else if (request instanceof UpdateRequest) { out.writeByte((byte) 2); - } else if (request instanceof UpdateReplicaRequest) { - out.writeByte((byte) 3); + ((UpdateRequest) request).writeTo(out); } else { throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]"); } - request.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index df9fd13b034..079d4efe9bf 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -20,6 +20,9 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 7729c737439..dc72407cf42 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -528,11 +528,11 @@ public class BulkRequest extends ActionRequest implements Composite } for (DocumentRequest request : requests) { // We first check if refresh has been set - if (request.getRefreshPolicy() != RefreshPolicy.NONE) { + if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } - ActionRequestValidationException ex = request.validate(); + ActionRequestValidationException ex = ((WriteRequest) request).validate(); if (ex != null) { if (validationException == null) { validationException = new ActionRequestValidationException(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 37c1b7c2290..f7861d1e093 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -27,12 +27,14 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.action.update.TransportUpdateAction; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -209,7 +211,7 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = bulkRequest.requests.get(i); //the request can only be null because we set it to null in the previous step, so it gets ignored if (documentRequest == null) { continue; @@ -232,8 +234,10 @@ public class TransportBulkAction extends HandledTransportAction { +public class TransportShardBulkAction extends TransportWriteAction { public static final String ACTION_NAME = BulkAction.NAME + "[s]"; + private final UpdateHelper updateHelper; private final boolean allowIdGeneration; private final MappingUpdatedAction mappingUpdatedAction; - private final UpdateHelper updateHelper; - private final AutoCreateIndex autoCreateIndex; - private final TransportCreateIndexAction createIndexAction; @Inject public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, ScriptService scriptService, - AutoCreateIndex autoCreateIndex, TransportCreateIndexAction createIndexAction) { + MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK); + indexNameExpressionResolver, BulkShardRequest::new, ThreadPool.Names.BULK); + this.updateHelper = updateHelper; this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); this.mappingUpdatedAction = mappingUpdatedAction; - this.updateHelper = new UpdateHelper(scriptService, logger); - this.autoCreateIndex = autoCreateIndex; - this.createIndexAction = createIndexAction; } @Override @@ -122,39 +105,7 @@ public class TransportShardBulkAction extends TransportWriteAction listener) { - ClusterState state = clusterService.state(); - if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(); - createIndexRequest.index(request.index()); - createIndexRequest.cause("auto(bulk api)"); - createIndexRequest.masterNodeTimeout(request.timeout()); - createIndexAction.execute(task, createIndexRequest, new ActionListener() { - @Override - public void onResponse(CreateIndexResponse result) { - innerExecute(task, request, listener); - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { - // we have the index, do it - innerExecute(task, request, listener); - } else { - listener.onFailure(e); - } - } - }); - } else { - innerExecute(task, request, listener); - } - } - - private void innerExecute(Task task, final BulkShardRequest request, final ActionListener listener) { - super.doExecute(task, request, listener); - } - @Override - protected WriteResult onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { + protected WriteResult onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { ShardId shardId = request.shardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); @@ -172,7 +123,7 @@ public class TransportShardBulkAction extends TransportWriteAction(request, response, location); + return new WriteResult<>(response, location); } /** Executes bulk item requests and handles request execution exceptions */ @@ -180,39 +131,22 @@ public class TransportShardBulkAction extends TransportWriteAction itemRequest = request.items()[requestIndex].request(); - preVersions[requestIndex] = itemRequest.version(); - preVersionTypes[requestIndex] = itemRequest.versionType(); - DocumentRequest.OpType opType = itemRequest.opType(); + preVersions[requestIndex] = request.items()[requestIndex].request().version(); + preVersionTypes[requestIndex] = request.items()[requestIndex].request().versionType(); + DocumentRequest.OpType opType = request.items()[requestIndex].request().opType(); try { - final WriteResult writeResult; - switch (itemRequest.opType()) { - case CREATE: - case INDEX: - writeResult = TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, - mappingUpdatedAction); - break; - case UPDATE: - writeResult = TransportUpdateAction.executeUpdateRequestOnPrimary(((UpdateRequest) itemRequest), indexShard, - metaData, updateHelper, mappingUpdatedAction, allowIdGeneration); - break; - case DELETE: - writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); - break; - default: - throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); - } + WriteResult writeResult = innerExecuteBulkItemRequest(metaData, indexShard, + request, requestIndex); if (writeResult.getLocation() != null) { location = locationToSync(location, writeResult.getLocation()); } else { assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP : "only noop operation can have null next operation"; } - // update the bulk item request with replica request (update request are changed to index or delete requests for replication) - request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), - (DocumentRequest) writeResult.getReplicaRequest()); + // update the bulk item request because update request execution can mutate the bulk item request + BulkItemRequest item = request.items()[requestIndex]; // add the response - setResponse(request.items()[requestIndex], new BulkItemResponse(request.items()[requestIndex].id(), opType, writeResult.getResponse())); + setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse())); } catch (Exception e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it if (retryPrimaryException(e)) { @@ -248,6 +182,33 @@ public class TransportShardBulkAction extends TransportWriteAction innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, int requestIndex) throws Exception { + DocumentRequest itemRequest = request.items()[requestIndex].request(); + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction); + case UPDATE: + int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict(); + for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { + try { + return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest)); + } catch (Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (attemptCount == maxAttempts // bubble up exception when we run out of attempts + || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict + throw e; + } + } + } + throw new IllegalStateException("version conflict exception should bubble up on last attempt"); + case DELETE: + return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); + } + } + private void setResponse(BulkItemRequest request, BulkItemResponse response) { request.setPrimaryResponse(response); if (response.isFailed()) { @@ -258,6 +219,51 @@ public class TransportShardBulkAction extends TransportWriteAction shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard, + BulkShardRequest request, + int requestIndex, UpdateRequest updateRequest) + throws Exception { + // Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request + UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); + switch (translate.getResponseResult()) { + case CREATED: + case UPDATED: + IndexRequest indexRequest = translate.action(); + MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + indexRequest.process(mappingMd, allowIdGeneration, request.index()); + WriteResult writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); + BytesReference indexSourceAsBytes = indexRequest.source(); + IndexResponse indexResponse = writeResult.getResponse(); + UpdateResponse writeUpdateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); + if (updateRequest.fields() != null && updateRequest.fields().length > 0) { + Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); + writeUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + } + // Replace the update request to the translated index request to execute on the replica. + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); + return new WriteResult<>(writeUpdateResponse, writeResult.getLocation()); + case DELETED: + DeleteRequest deleteRequest = translate.action(); + WriteResult deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); + DeleteResponse response = deleteResult.getResponse(); + UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); + deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null)); + // Replace the update request to the translated delete request to execute on the replica. + request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); + return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation()); + case NOOP: + BulkItemRequest item = request.items()[requestIndex]; + indexShard.noopUpdate(updateRequest.type()); + item.setIgnoreOnReplica(); // no need to go to the replica + return new WriteResult<>(translate.action(), null); + default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + } + } + @Override protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) { Translog.Location location = null; @@ -266,8 +272,7 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = item.request(); final Engine.Operation operation; try { switch (documentRequest.opType()) { diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index f2e5e13494d..e3babcfc380 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 926700e327e..6f3d27ea369 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.delete; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -50,7 +49,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportWriteAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -62,7 +61,7 @@ public class TransportDeleteAction extends TransportWriteAction listener) { ClusterState state = clusterService.state(); if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(); - createIndexRequest.index(request.index()); - createIndexRequest.cause("auto(delete api)"); - createIndexRequest.masterNodeTimeout(request.timeout()); - createIndexAction.execute(task, createIndexRequest, new ActionListener() { + createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(task, request, listener); @@ -105,6 +100,15 @@ public class TransportDeleteAction extends TransportWriteAction listener) { super.doExecute(task, request, listener); } @@ -115,7 +119,7 @@ public class TransportDeleteAction extends TransportWriteAction onPrimaryShard(DeleteRequest request, IndexShard indexShard) { + protected WriteResult onPrimaryShard(DeleteRequest request, IndexShard indexShard) { return executeDeleteRequestOnPrimary(request, indexShard); } @@ -124,7 +128,7 @@ public class TransportDeleteAction extends TransportWriteAction executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { + public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { Engine.Delete delete = indexShard.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); indexShard.delete(delete); // update the request with the version so it will go to the replicas @@ -133,7 +137,7 @@ public class TransportDeleteAction extends TransportWriteAction(request, response, delete.getTranslogLocation()); + return new WriteResult<>(response, delete.getTranslogLocation()); } public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 48eaab2b48c..cce0f6c8eef 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -67,7 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends DocumentRequest { +public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 37cc2d7e3bc..cc3fbb7906d 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -60,7 +60,7 @@ import org.elasticsearch.transport.TransportService; *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportWriteAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -76,7 +76,7 @@ public class TransportIndexAction extends TransportWriteAction onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { + protected WriteResult onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); } @@ -174,7 +174,7 @@ public class TransportIndexAction extends TransportWriteAction executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception { Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); @@ -198,7 +198,7 @@ public class TransportIndexAction extends TransportWriteAction(request, response, operation.getTranslogLocation()); + return new WriteResult<>(response, operation.getTranslogLocation()); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 8aa0ed66a77..d541ef6a35c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -113,24 +112,22 @@ public class ReplicationOperation< pendingActions.incrementAndGet(); primaryResult = primary.perform(request); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); - if (replicaRequest != null) { - assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; - if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); - } - - // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. - // we have to make sure that every operation indexed into the primary after recovery start will also be replicated - // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. - ClusterState clusterState = clusterStateSupplier.get(); - final List shards = getShards(primaryId, clusterState); - Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); - - markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); - - performOnReplicas(replicaRequest, shards); + assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; + if (logger.isTraceEnabled()) { + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); } + // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. + // we have to make sure that every operation indexed into the primary after recovery start will also be replicated + // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. + ClusterState clusterState = clusterStateSupplier.get(); + final List shards = getShards(primaryId, clusterState); + Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); + + markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); + + performOnReplicas(replicaRequest, shards); + successfulShards.incrementAndGet(); decPendingAndFinishIfNeeded(); } @@ -422,10 +419,7 @@ public class ReplicationOperation< public interface PrimaryResult> { - /** - * @return null if no operation needs to be sent to a replica - */ - @Nullable R replicaRequest(); + R replicaRequest(); void setShardInfo(ReplicationResponse.ShardInfo shardInfo); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 95e196672d4..9587b4e6b2c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -23,8 +23,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -165,16 +163,6 @@ public abstract class TransportReplicationAction< } } - /** helper to verify and resolve request routing */ - public static void resolveAndValidateRouting(final MetaData metaData, final String concreteIndex, - DocumentRequest request) { - request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())); - // check if routing is required, if so, throw error if routing wasn't specified - if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) { - throw new RoutingMissingException(concreteIndex, request.type(), request.id()); - } - } - /** * Primary operation on node with primary copy. * @@ -912,9 +900,7 @@ public abstract class TransportReplicationAction< @Override public PrimaryResult perform(Request request) throws Exception { PrimaryResult result = shardOperationOnPrimary(request); - if (result.replicaRequest() != null) { - result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); - } + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); return result; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index ee8ee4862f9..bf2b3235b11 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -49,40 +49,38 @@ import java.util.function.Supplier; */ public abstract class TransportWriteAction< Request extends ReplicatedWriteRequest, - ReplicaRequest extends ReplicatedWriteRequest, Response extends ReplicationResponse & WriteResponse - > extends TransportReplicationAction { + > extends TransportReplicationAction { protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor) { super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, replicaRequest, executor); + indexNameExpressionResolver, request, request, executor); } /** * Called on the primary with a reference to the {@linkplain IndexShard} to modify. */ - protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; + protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; /** * Called once per replica with a reference to the {@linkplain IndexShard} to modify. * * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred */ - protected abstract Translog.Location onReplicaShard(ReplicaRequest request, IndexShard indexShard); + protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); @Override protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { IndexShard indexShard = indexShard(request); - WriteResult result = onPrimaryShard(request, indexShard); - return new WritePrimaryResult(request, result, indexShard); + WriteResult result = onPrimaryShard(request, indexShard); + return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); } @Override - protected final WriteReplicaResult shardOperationOnReplica(ReplicaRequest request) { + protected final WriteReplicaResult shardOperationOnReplica(Request request) { IndexShard indexShard = indexShard(request); Translog.Location location = onReplicaShard(request, indexShard); return new WriteReplicaResult(indexShard, request, location); @@ -91,7 +89,7 @@ public abstract class TransportWriteAction< /** * Fetch the IndexShard for the request. Protected so it can be mocked in tests. */ - protected IndexShard indexShard(ReplicatedWriteRequest request) { + protected IndexShard indexShard(Request request) { final ShardId shardId = request.shardId(); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); return indexService.getShard(shardId.id()); @@ -100,13 +98,11 @@ public abstract class TransportWriteAction< /** * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. */ - public static class WriteResult, Response extends ReplicationResponse> { - private final ReplicaRequest replicaRequest; + public static class WriteResult { private final Response response; private final Translog.Location location; - public WriteResult(ReplicaRequest replicaRequest, Response response, @Nullable Location location) { - this.replicaRequest = replicaRequest; + public WriteResult(Response response, @Nullable Location location) { this.response = response; this.location = location; } @@ -118,10 +114,6 @@ public abstract class TransportWriteAction< public Translog.Location getLocation() { return location; } - - public ReplicaRequest getReplicaRequest() { - return replicaRequest; - } } /** @@ -131,15 +123,15 @@ public abstract class TransportWriteAction< boolean finishedAsyncActions; ActionListener listener = null; - public WritePrimaryResult(Request request, - WriteResult result, + public WritePrimaryResult(Request request, Response finalResponse, + @Nullable Translog.Location location, IndexShard indexShard) { - super(result.getReplicaRequest(), result.getResponse()); + super(request, finalResponse); /* * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the * refresh in parallel on the primary and on the replica. */ - new AsyncAfterWriteAction(indexShard, request, result.getLocation(), this, logger).run(); + new AsyncAfterWriteAction(indexShard, request, location, this, logger).run(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java new file mode 100644 index 00000000000..cb9a6ab9f69 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single.instance; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * + */ +public abstract class InstanceShardOperationRequest> extends ActionRequest + implements IndicesRequest { + + public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); + + protected TimeValue timeout = DEFAULT_TIMEOUT; + + protected String index; + // null means its not set, allows to explicitly direct a request to a specific shard + protected ShardId shardId = null; + + private String concreteIndex; + + protected InstanceShardOperationRequest() { + } + + public InstanceShardOperationRequest(String index) { + this.index = index; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = ValidateActions.addValidationError("index is missing", validationException); + } + return validationException; + } + + public String index() { + return index; + } + + @Override + public String[] indices() { + return new String[]{index}; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @SuppressWarnings("unchecked") + public final Request index(String index) { + this.index = index; + return (Request) this; + } + + public TimeValue timeout() { + return timeout; + } + + /** + * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + */ + @SuppressWarnings("unchecked") + public final Request timeout(TimeValue timeout) { + this.timeout = timeout; + return (Request) this; + } + + /** + * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + */ + public final Request timeout(String timeout) { + return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); + } + + public String concreteIndex() { + return concreteIndex; + } + + void concreteIndex(String concreteIndex) { + this.concreteIndex = concreteIndex; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + index = in.readString(); + if (in.readBoolean()) { + shardId = ShardId.readShardId(in); + } else { + shardId = null; + } + timeout = new TimeValue(in); + concreteIndex = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + out.writeOptionalStreamable(shardId); + timeout.writeTo(out); + out.writeOptionalString(concreteIndex); + } + +} + diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java new file mode 100644 index 00000000000..13266b9151d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single.instance; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; + +/** + */ +public abstract class InstanceShardOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends InstanceShardOperationRequestBuilder> + extends ActionRequestBuilder { + + protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); + } + + @SuppressWarnings("unchecked") + public final RequestBuilder setIndex(String index) { + request.index(index); + return (RequestBuilder) this; + } + + /** + * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + */ + @SuppressWarnings("unchecked") + public final RequestBuilder setTimeout(TimeValue timeout) { + request.timeout(timeout); + return (RequestBuilder) this; + } + + /** + * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + */ + @SuppressWarnings("unchecked") + public final RequestBuilder setTimeout(String timeout) { + request.timeout(timeout); + return (RequestBuilder) this; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java new file mode 100644 index 00000000000..81da5ec9a86 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -0,0 +1,270 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single.instance; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; + +import java.util.function.Supplier; + +/** + * + */ +public abstract class TransportInstanceSingleOperationAction, Response extends ActionResponse> + extends HandledTransportAction { + protected final ClusterService clusterService; + protected final TransportService transportService; + + final String executor; + final String shardActionName; + + protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + this.clusterService = clusterService; + this.transportService = transportService; + this.executor = executor(); + this.shardActionName = actionName + "[s]"; + transportService.registerRequestHandler(shardActionName, request, executor, new ShardTransportHandler()); + } + + @Override + protected void doExecute(Request request, ActionListener listener) { + new AsyncSingleAction(request, listener).start(); + } + + protected abstract String executor(); + + protected abstract void shardOperation(Request request, ActionListener listener); + + protected abstract Response newResponse(); + + protected ClusterBlockException checkGlobalBlock(ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); + } + + protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) { + return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex()); + } + + /** + * Resolves the request. Throws an exception if the request cannot be resolved. + */ + protected abstract void resolveRequest(ClusterState state, Request request); + + protected boolean retryOnFailure(Exception e) { + return false; + } + + protected TransportRequestOptions transportOptions() { + return TransportRequestOptions.EMPTY; + } + + /** + * Should return an iterator with a single shard! + */ + protected abstract ShardIterator shards(ClusterState clusterState, Request request); + + class AsyncSingleAction { + + private final ActionListener listener; + private final Request request; + private volatile ClusterStateObserver observer; + private ShardIterator shardIt; + private DiscoveryNodes nodes; + + AsyncSingleAction(Request request, ActionListener listener) { + this.request = request; + this.listener = listener; + } + + public void start() { + this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext()); + doStart(); + } + + protected void doStart() { + nodes = observer.observedState().nodes(); + try { + ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); + if (blockException != null) { + if (blockException.retryable()) { + retry(blockException); + return; + } else { + throw blockException; + } + } + request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request).getName()); + resolveRequest(observer.observedState(), request); + blockException = checkRequestBlock(observer.observedState(), request); + if (blockException != null) { + if (blockException.retryable()) { + retry(blockException); + return; + } else { + throw blockException; + } + } + shardIt = shards(observer.observedState(), request); + } catch (Exception e) { + listener.onFailure(e); + return; + } + + // no shardIt, might be in the case between index gateway recovery and shardIt initialization + if (shardIt.size() == 0) { + retry(null); + return; + } + + // this transport only make sense with an iterator that returns a single shard routing (like primary) + assert shardIt.size() == 1; + + ShardRouting shard = shardIt.nextOrNull(); + assert shard != null; + + if (!shard.active()) { + retry(null); + return; + } + + request.shardId = shardIt.shardId(); + DiscoveryNode node = nodes.get(shard.currentNodeId()); + transportService.sendRequest(node, shardActionName, request, transportOptions(), new TransportResponseHandler() { + + @Override + public Response newInstance() { + return newResponse(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + final Throwable cause = exp.unwrapCause(); + // if we got disconnected from the node, or the node / shard is not in the right state (being closed) + if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || + retryOnFailure(exp)) { + retry((Exception) cause); + } else { + listener.onFailure(exp); + } + } + }); + } + + void retry(@Nullable final Exception failure) { + if (observer.isTimedOut()) { + // we running as a last attempt after a timeout has happened. don't retry + Exception listenFailure = failure; + if (listenFailure == null) { + if (shardIt == null) { + listenFailure = new UnavailableShardsException(request.concreteIndex(), -1, "Timeout waiting for [{}], request: {}", request.timeout(), actionName); + } else { + listenFailure = new UnavailableShardsException(shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName); + } + } + listener.onFailure(listenFailure); + return; + } + + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + doStart(); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(nodes.getLocalNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + // just to be on the safe side, see if we can start it now? + doStart(); + } + }, request.timeout()); + } + } + + private class ShardTransportHandler implements TransportRequestHandler { + + @Override + public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + shardOperation(request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn("failed to send response for get", inner); + } + } + }); + + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 1f3a97a25a2..e5322f51d50 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -34,17 +34,19 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportWriteAction; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.action.support.single.instance.TransportInstanceSingleOperationAction; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.PlainShardIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -52,52 +54,59 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; import java.util.Map; import static org.elasticsearch.ExceptionsHelper.unwrapCause; /** */ -public class TransportUpdateAction extends TransportWriteAction { +public class TransportUpdateAction extends TransportInstanceSingleOperationAction { + private final TransportDeleteAction deleteAction; + private final TransportIndexAction indexAction; private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; private final UpdateHelper updateHelper; private final IndicesService indicesService; - private final MappingUpdatedAction mappingUpdatedAction; - private final boolean allowIdGeneration; @Inject public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - TransportCreateIndexAction createIndexAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, - AutoCreateIndex autoCreateIndex, ShardStateAction shardStateAction, - MappingUpdatedAction mappingUpdatedAction, ScriptService scriptService) { - super(settings, UpdateAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, UpdateRequest::new, UpdateReplicaRequest::new, ThreadPool.Names.INDEX); + TransportIndexAction indexAction, TransportDeleteAction deleteAction, TransportCreateIndexAction createIndexAction, + UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IndicesService indicesService, AutoCreateIndex autoCreateIndex) { + super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new); + this.indexAction = indexAction; + this.deleteAction = deleteAction; this.createIndexAction = createIndexAction; - this.updateHelper = new UpdateHelper(scriptService, logger); + this.updateHelper = updateHelper; this.indicesService = indicesService; this.autoCreateIndex = autoCreateIndex; - this.mappingUpdatedAction = mappingUpdatedAction; - this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); } @Override - protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, UpdateRequest request) { - super.resolveRequest(metaData, indexMetaData, request); - resolveAndValidateRouting(metaData, indexMetaData.getIndex().getName(), request); - ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), - indexMetaData.getIndex().getName(), request.id(), request.routing()); - request.setShardId(shardId); + protected String executor() { + return ThreadPool.Names.INDEX; + } + + @Override + protected UpdateResponse newResponse() { + return new UpdateResponse(); + } + + @Override + protected boolean retryOnFailure(Exception e) { + return TransportActions.isShardNotAvailableException(e); + } + + @Override + protected void resolveRequest(ClusterState state, UpdateRequest request) { + resolveAndValidateRouting(state.metaData(), request.concreteIndex(), request); } public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) { @@ -109,17 +118,13 @@ public class TransportUpdateAction extends TransportWriteAction listener) { + protected void doExecute(final UpdateRequest request, final ActionListener listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(); - createIndexRequest.index(request.index()); - createIndexRequest.cause("auto(update api)"); - createIndexRequest.masterNodeTimeout(request.timeout()); - createIndexAction.execute(createIndexRequest, new ActionListener() { + createIndexAction.execute(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { - innerExecute(task, request, listener); + innerExecute(request, listener); } @Override @@ -127,7 +132,7 @@ public class TransportUpdateAction extends TransportWriteAction listener) { - super.doExecute(task, request, listener); + private void innerExecute(final UpdateRequest request, final ActionListener listener) { + super.doExecute(request, listener); } @Override - protected WriteResult onPrimaryShard(UpdateRequest request, IndexShard indexShard) throws Exception { - ShardId shardId = request.shardId(); - final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexMetaData indexMetaData = indexService.getMetaData(); - return executeUpdateRequestOnPrimary(request, indexShard, indexMetaData, updateHelper, mappingUpdatedAction, allowIdGeneration); - } - - public static WriteResult executeUpdateRequestOnPrimary(UpdateRequest request, - IndexShard indexShard, - IndexMetaData indexMetaData, - UpdateHelper updateHelper, - MappingUpdatedAction mappingUpdatedAction, - boolean allowIdGeneration) - throws Exception { - int maxAttempts = request.retryOnConflict(); - for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { - try { - return shardUpdateOperation(indexMetaData, indexShard, request, updateHelper, mappingUpdatedAction, allowIdGeneration); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - if (attemptCount == maxAttempts // bubble up exception when we run out of attempts - || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict - throw e; - } + protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) { + if (request.getShardId() != null) { + return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt(); + } + ShardIterator shardIterator = clusterService.operationRouting() + .indexShards(clusterState, request.concreteIndex(), request.id(), request.routing()); + ShardRouting shard; + while ((shard = shardIterator.nextOrNull()) != null) { + if (shard.primary()) { + return new PlainShardIterator(shardIterator.shardId(), Collections.singletonList(shard)); } } - throw new IllegalStateException("version conflict exception should bubble up on last attempt"); - + return new PlainShardIterator(shardIterator.shardId(), Collections.emptyList()); } - private static WriteResult shardUpdateOperation(IndexMetaData indexMetaData, - IndexShard indexShard, - UpdateRequest request, - UpdateHelper updateHelper, - MappingUpdatedAction mappingUpdatedAction, - boolean allowIdGeneration) - throws Exception { + @Override + protected void shardOperation(final UpdateRequest request, final ActionListener listener) { + shardOperation(request, listener, 0); + } + + protected void shardOperation(final UpdateRequest request, final ActionListener listener, final int retryCount) { + final ShardId shardId = request.getShardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.getResponseResult()) { case CREATED: + IndexRequest upsertRequest = result.action(); + // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request + final BytesReference upsertSourceBytes = upsertRequest.source(); + indexAction.execute(upsertRequest, new ActionListener() { + @Override + public void onResponse(IndexResponse response) { + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); + if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || + (request.fields() != null && request.fields().length > 0)) { + Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true); + update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); + } else { + update.setGetResult(null); + } + update.setForcedRefresh(response.forcedRefresh()); + listener.onResponse(update); + } + + @Override + public void onFailure(Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { + if (retryCount < request.retryOnConflict()) { + logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", + retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); + threadPool.executor(executor()).execute(new ActionRunnable(listener) { + @Override + protected void doRun() { + shardOperation(request, listener, retryCount + 1); + } + }); + return; + } + } + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); + } + }); + break; case UPDATED: IndexRequest indexRequest = result.action(); - MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); - indexRequest.process(mappingMd, allowIdGeneration, indexMetaData.getIndex().getName()); - WriteResult indexResponseWriteResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); - IndexResponse response = indexResponseWriteResult.getResponse(); - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference indexSourceBytes = indexRequest.source(); - if (result.getResponseResult() == DocWriteResponse.Result.CREATED) { - if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || - (request.fields() != null && request.fields().length > 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceBytes, true); - update.setGetResult(updateHelper.extractGetResult(request, indexMetaData.getIndex().getName(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceBytes)); - } else { - update.setGetResult(null); + indexAction.execute(indexRequest, new ActionListener() { + @Override + public void onResponse(IndexResponse response) { + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); + update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + update.setForcedRefresh(response.forcedRefresh()); + listener.onResponse(update); } - } else if (result.getResponseResult() == DocWriteResponse.Result.UPDATED) { - update.setGetResult(updateHelper.extractGetResult(request, indexMetaData.getIndex().getName(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); - } - update.setForcedRefresh(response.forcedRefresh()); - UpdateReplicaRequest updateReplicaRequest = new UpdateReplicaRequest(indexRequest); - updateReplicaRequest.setParentTask(request.getParentTask()); - updateReplicaRequest.setShardId(request.shardId()); - updateReplicaRequest.setRefreshPolicy(request.getRefreshPolicy()); - return new WriteResult<>(updateReplicaRequest, update, indexResponseWriteResult.getLocation()); + + @Override + public void onFailure(Exception e) { + final Throwable cause = unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { + if (retryCount < request.retryOnConflict()) { + threadPool.executor(executor()).execute(new ActionRunnable(listener) { + @Override + protected void doRun() { + shardOperation(request, listener, retryCount + 1); + } + }); + return; + } + } + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); + } + }); + break; case DELETED: DeleteRequest deleteRequest = result.action(); - WriteResult deleteResponseWriteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = deleteResponseWriteResult.getResponse(); - UpdateResponse deleteUpdate = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(), deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getVersion(), deleteResponse.getResult()); - deleteUpdate.setGetResult(updateHelper.extractGetResult(request, indexMetaData.getIndex().getName(), deleteResponse.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); - deleteUpdate.setForcedRefresh(deleteResponse.forcedRefresh()); - UpdateReplicaRequest deleteReplicaRequest = new UpdateReplicaRequest(deleteRequest); - deleteReplicaRequest.setParentTask(request.getParentTask()); - deleteReplicaRequest.setShardId(request.shardId()); - deleteReplicaRequest.setRefreshPolicy(request.getRefreshPolicy()); - return new WriteResult<>(deleteReplicaRequest, deleteUpdate, deleteResponseWriteResult.getLocation()); + deleteAction.execute(deleteRequest, new ActionListener() { + @Override + public void onResponse(DeleteResponse response) { + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); + update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + update.setForcedRefresh(response.forcedRefresh()); + listener.onResponse(update); + } + + @Override + public void onFailure(Exception e) { + final Throwable cause = unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { + if (retryCount < request.retryOnConflict()) { + threadPool.executor(executor()).execute(new ActionRunnable(listener) { + @Override + protected void doRun() { + shardOperation(request, listener, retryCount + 1); + } + }); + return; + } + } + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); + } + }); + break; case NOOP: - UpdateResponse noopUpdate = result.action(); - indexShard.noopUpdate(request.type()); - return new WriteResult<>(null, noopUpdate, null); + UpdateResponse update = result.action(); + IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex()); + if (indexServiceOrNull != null) { + IndexShard shard = indexService.getShardOrNull(shardId.getId()); + if (shard != null) { + shard.noopUpdate(request.type()); + } + } + listener.onResponse(update); + break; default: throw new IllegalStateException("Illegal result " + result.getResponseResult()); } } - - @Override - protected Translog.Location onReplicaShard(UpdateReplicaRequest request, IndexShard indexShard) { - assert request.getRequest() != null; - final Translog.Location location; - switch (request.getRequest().opType()) { - case INDEX: - case CREATE: - location = TransportIndexAction.executeIndexRequestOnReplica(((IndexRequest) request.getRequest()), indexShard).getTranslogLocation(); - break; - case DELETE: - location = TransportDeleteAction.executeDeleteRequestOnReplica(((DeleteRequest) request.getRequest()), indexShard).getTranslogLocation(); - break; - default: - throw new IllegalStateException("unexpected opType [" + request.getRequest().opType().getLowercase() + "]"); - - } - return location; - } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index c242f885f06..49206470532 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.update; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -28,8 +27,11 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -61,14 +63,14 @@ import java.util.Map; /** * Helper for translating an update request to an index, delete request or update response. */ -public class UpdateHelper { +public class UpdateHelper extends AbstractComponent { private final ScriptService scriptService; - private final Logger logger; - public UpdateHelper(ScriptService scriptService, Logger logger) { + @Inject + public UpdateHelper(Settings settings, ScriptService scriptService) { + super(settings); this.scriptService = scriptService; - this.logger = logger; } /** @@ -257,7 +259,7 @@ public class UpdateHelper { return ctx; } - private static TimeValue getTTLFromScriptContext(Map ctx) { + private TimeValue getTTLFromScriptContext(Map ctx) { Object fetchedTTL = ctx.get("_ttl"); if (fetchedTTL != null) { if (fetchedTTL instanceof Number) { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java deleted file mode 100644 index 5f258a675c2..00000000000 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateReplicaRequest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.update; - -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.VersionType; - -import java.io.IOException; - -/** Replica request for update operation holds translated (index/delete) requests */ -public class UpdateReplicaRequest extends DocumentRequest { - private DocumentRequest request; - - public UpdateReplicaRequest() { - } - - public UpdateReplicaRequest(DocumentRequest request) { - assert !(request instanceof UpdateReplicaRequest) : "underlying request must not be a update replica request"; - this.request = request; - this.index = request.index(); - setRefreshPolicy(request.getRefreshPolicy()); - setShardId(request.shardId()); - setParentTask(request.getParentTask()); - } - - public DocumentRequest getRequest() { - return request; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = DocumentRequest.readDocumentRequest(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - DocumentRequest.writeDocumentRequest(out, request); - } - - @Override - public String type() { - return request.type(); - } - - @Override - public String id() { - return request.id(); - } - - @Override - public UpdateReplicaRequest routing(String routing) { - throw new UnsupportedOperationException("setting routing is not supported"); - } - - @Override - public String routing() { - return request.routing(); - } - - @Override - public String parent() { - return request.parent(); - } - - @Override - public long version() { - return request.version(); - } - - @Override - public UpdateReplicaRequest version(long version) { - throw new UnsupportedOperationException("setting version is not supported"); - } - - @Override - public VersionType versionType() { - return request.versionType(); - } - - @Override - public UpdateReplicaRequest versionType(VersionType versionType) { - throw new UnsupportedOperationException("setting version type is not supported"); - } - - @Override - public OpType opType() { - return request.opType(); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 80d3676e051..deca938fa6a 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -23,6 +23,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; @@ -53,7 +56,10 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ -public class UpdateRequest extends DocumentRequest { +public class UpdateRequest extends InstanceShardOperationRequest + implements DocumentRequest, WriteRequest { + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(Loggers.getLogger(UpdateRequest.class)); private String type; private String id; @@ -91,7 +97,7 @@ public class UpdateRequest extends DocumentRequest { } public UpdateRequest(String index, String type, String id) { - this.index = index; + super(index); this.type = type; this.id = id; } @@ -489,6 +495,39 @@ public class UpdateRequest extends DocumentRequest { return OpType.UPDATE; } + @Override + public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + public ActiveShardCount waitForActiveShards() { + return this.waitForActiveShards; + } + + /** + * Sets the number of shard copies that must be active before proceeding with the write. + * See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details. + */ + public UpdateRequest waitForActiveShards(ActiveShardCount waitForActiveShards) { + this.waitForActiveShards = waitForActiveShards; + return this; + } + + /** + * A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical + * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)} + * to get the ActiveShardCount. + */ + public UpdateRequest waitForActiveShards(final int waitForActiveShards) { + return waitForActiveShards(ActiveShardCount.from(waitForActiveShards)); + } + /** * Sets the doc to use for updates when a script is not specified. */ diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index e9b111f4df9..bbbc9bafd8f 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; +import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.DeprecationLogger; @@ -37,7 +37,7 @@ import org.elasticsearch.script.Script; import java.util.Map; -public class UpdateRequestBuilder extends ReplicationRequestBuilder +public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder implements WriteRequestBuilder { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class)); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index 83f347d6b98..eb1843dc7d9 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,6 +22,7 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; @@ -181,6 +182,7 @@ public class IndicesModule extends AbstractModule { bind(SyncedFlushService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(IndicesTTLService.class).asEagerSingleton(); + bind(UpdateHelper.class).asEagerSingleton(); bind(MetaDataIndexUpgradeService.class).asEagerSingleton(); bind(NodeServicesProvider.class).asEagerSingleton(); } diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 5c692668f26..d1d01610f18 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -222,7 +222,8 @@ public class IndicesRequestIT extends ESIntegTestCase { } public void testUpdate() { - String[] updateShardActions = new String[]{UpdateAction.NAME + "[p]", UpdateAction.NAME + "[r]"}; + //update action goes to the primary, index op gets executed locally, then replicated + String[] updateShardActions = new String[]{UpdateAction.NAME + "[s]", IndexAction.NAME + "[r]"}; interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); @@ -236,7 +237,8 @@ public class IndicesRequestIT extends ESIntegTestCase { } public void testUpdateUpsert() { - String[] updateShardActions = new String[]{UpdateAction.NAME + "[p]", UpdateAction.NAME + "[r]"}; + //update action goes to the primary, index op gets executed locally, then replicated + String[] updateShardActions = new String[]{UpdateAction.NAME + "[s]", IndexAction.NAME + "[r]"}; interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); @@ -249,7 +251,8 @@ public class IndicesRequestIT extends ESIntegTestCase { } public void testUpdateDelete() { - String[] updateShardActions = new String[]{UpdateAction.NAME + "[p]", UpdateAction.NAME + "[r]"}; + //update action goes to the primary, delete op gets executed locally, then replicated + String[] updateShardActions = new String[]{UpdateAction.NAME + "[s]", DeleteAction.NAME + "[r]"}; interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index d2070fb21db..a554ca53d99 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; @@ -129,21 +128,21 @@ public class TransportWriteActionTests extends ESTestCase { resultChecker.accept(listener.response, forcedRefresh); } - private class TestAction extends TransportWriteAction { + private class TestAction extends TransportWriteAction { protected TestAction() { super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), null, null, null, null, new ActionFilters(new HashSet<>()), - new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, TestRequest::new, ThreadPool.Names.SAME); + new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME); } @Override - protected IndexShard indexShard(ReplicatedWriteRequest request) { + protected IndexShard indexShard(TestRequest request) { return indexShard; } @Override - protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { - return new WriteResult<>(request, new TestResponse(), location); + protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { + return new WriteResult<>(new TestResponse(), location); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java new file mode 100644 index 00000000000..1d736060568 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -0,0 +1,327 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single.instance; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; + +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.core.IsEqual.equalTo; + +public class TransportInstanceSingleOperationActionTests extends ESTestCase { + + private static ThreadPool THREAD_POOL; + + private ClusterService clusterService; + private CapturingTransport transport; + private TransportService transportService; + + private TestTransportInstanceSingleOperationAction action; + + public static class Request extends InstanceShardOperationRequest { + public Request() { + } + } + + public static class Response extends ActionResponse { + public Response() { + } + } + + class TestTransportInstanceSingleOperationAction extends TransportInstanceSingleOperationAction { + private final Map shards = new HashMap<>(); + + public TestTransportInstanceSingleOperationAction(Settings settings, String actionName, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { + super(settings, actionName, THREAD_POOL, TransportInstanceSingleOperationActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request); + } + + public Map getResults() { + return shards; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected void shardOperation(Request request, ActionListener listener) { + throw new UnsupportedOperationException("Not implemented in test class"); + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void resolveRequest(ClusterState state, Request request) { + } + + @Override + protected ShardIterator shards(ClusterState clusterState, Request request) { + return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId.getId()).primaryShardIt(); + } + } + + class MyResolver extends IndexNameExpressionResolver { + public MyResolver() { + super(Settings.EMPTY); + } + + @Override + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { + return request.indices(); + } + } + + @BeforeClass + public static void startThreadPool() { + THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new CapturingTransport(); + clusterService = createClusterService(THREAD_POOL); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService.start(); + transportService.acceptIncomingRequests(); + action = new TestTransportInstanceSingleOperationAction( + Settings.EMPTY, + "indices:admin/test", + transportService, + new ActionFilters(new HashSet()), + new MyResolver(), + Request::new + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + + @AfterClass + public static void destroyThreadPool() { + ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); + // since static must set to null to be eligible for collection + THREAD_POOL = null; + } + + public void testGlobalBlock() { + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); + try { + action.new AsyncSingleAction(request, listener).start(); + listener.get(); + fail("expected ClusterBlockException"); + } catch (Exception e) { + if (ExceptionsHelper.unwrap(e, ClusterBlockException.class) == null) { + logger.info("expected ClusterBlockException but got ", e); + fail("expected ClusterBlockException"); + } + } + } + + public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { + Request request = new Request().index("test"); + request.shardId = new ShardId("test", "_na_", 0); + PlainActionFuture listener = new PlainActionFuture<>(); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); + listener.get(); + } + + public void testFailureWithoutRetry() throws Exception { + Request request = new Request().index("test"); + request.shardId = new ShardId("test", "_na_", 0); + PlainActionFuture listener = new PlainActionFuture<>(); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + long requestId = transport.capturedRequests()[0].requestId; + transport.clear(); + // this should not trigger retry or anything and the listener should report exception immediately + transport.handleRemoteError(requestId, new TransportException("a generic transport exception", new Exception("generic test exception"))); + + try { + // result should return immediately + assertTrue(listener.isDone()); + listener.get(); + fail("this should fail with a transport exception"); + } catch (ExecutionException t) { + if (ExceptionsHelper.unwrap(t, TransportException.class) == null) { + logger.info("expected TransportException but got ", t); + fail("expected and TransportException"); + } + } + } + + public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { + Request request = new Request().index("test"); + request.shardId = new ShardId("test", "_na_", 0); + PlainActionFuture listener = new PlainActionFuture<>(); + boolean local = randomBoolean(); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); + action.new AsyncSingleAction(request, listener).start(); + // this should fail because primary not initialized + assertThat(transport.capturedRequests().length, equalTo(0)); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + // this time it should work + assertThat(transport.capturedRequests().length, equalTo(1)); + transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); + listener.get(); + } + + public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception { + Request request = new Request().index("test"); + request.shardId = new ShardId("test", "_na_", 0); + PlainActionFuture listener = new PlainActionFuture<>(); + boolean local = randomBoolean(); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + long requestId = transport.capturedRequests()[0].requestId; + transport.clear(); + DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); + transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); + // trigger cluster state observer + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + assertThat(transport.capturedRequests().length, equalTo(1)); + transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); + listener.get(); + } + + public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { + Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); + request.shardId = new ShardId("test", "_na_", 0); + PlainActionFuture listener = new PlainActionFuture<>(); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + long requestId = transport.capturedRequests()[0].requestId; + transport.clear(); + DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); + transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); + + // wait until the timeout was triggered and we actually tried to send for the second time + assertBusy(new Runnable() { + @Override + public void run() { + assertThat(transport.capturedRequests().length, equalTo(1)); + } + }); + + // let it fail the second time too + requestId = transport.capturedRequests()[0].requestId; + transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); + try { + // result should return immediately + assertTrue(listener.isDone()); + listener.get(); + fail("this should fail with a transport exception"); + } catch (ExecutionException t) { + if (ExceptionsHelper.unwrap(t, ConnectTransportException.class) == null) { + logger.info("expected ConnectTransportException but got ", t); + fail("expected and ConnectTransportException"); + } + } + } + + public void testUnresolvableRequestDoesNotHang() throws InterruptedException, ExecutionException, TimeoutException { + action = new TestTransportInstanceSingleOperationAction( + Settings.EMPTY, + "indices:admin/test_unresolvable", + transportService, + new ActionFilters(new HashSet<>()), + new MyResolver(), + Request::new + ) { + @Override + protected void resolveRequest(ClusterState state, Request request) { + throw new IllegalStateException("request cannot be resolved"); + } + }; + Request request = new Request().index("test"); + request.shardId = new ShardId("test", "_na_", 0); + PlainActionFuture listener = new PlainActionFuture<>(); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(0)); + try { + listener.get(); + } catch (Exception e) { + if (ExceptionsHelper.unwrap(e, IllegalStateException.class) == null) { + logger.info("expected IllegalStateException but got ", e); + fail("expected and IllegalStateException"); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index d4291464fdb..cb27a527f63 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -172,8 +172,9 @@ public class UpdateRequestTests extends ESTestCase { // Related to issue 3256 public void testUpdateRequestWithTTL() throws Exception { TimeValue providedTTLValue = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl"); + Settings settings = settings(Version.CURRENT).build(); - UpdateHelper updateHelper = new UpdateHelper(null, logger); + UpdateHelper updateHelper = new UpdateHelper(settings, null); // We just upsert one document with ttl IndexRequest indexRequest = new IndexRequest("test", "type1", "1") diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 67f0b4bf4a6..6e200c4756a 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -366,8 +366,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception { - TransportWriteAction.WriteResult result = - TransportIndexAction.executeIndexRequestOnPrimary(request, primary, null); + TransportWriteAction.WriteResult result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary, + null); request.primaryTerm(primary.getPrimaryTerm()); TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.getLocation(), logger); return new PrimaryResult(request, result.getResponse()); diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 7972bc39f37..ff4c4c657d7 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -162,8 +162,8 @@ the request was ignored. -------------------------------------------------- { "_shards": { - "total": 1, - "successful": 1, + "total": 0, + "successful": 0, "failed": 0 }, "_index": "test", From 9d48248a66adeca3bd1ecd8476bd41b79f5fd6cb Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 7 Oct 2016 17:56:35 -0400 Subject: [PATCH 14/53] remove redundant final qualifier --- .../src/main/java/org/elasticsearch/action/DocumentRequest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index f4c88e159c7..fb9d8a27661 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -166,7 +166,7 @@ public interface DocumentRequest extends IndicesRequest { /** read a document write (index/delete/update) request */ static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { byte type = in.readByte(); - final DocumentRequest documentRequest; + DocumentRequest documentRequest; if (type == 0) { IndexRequest indexRequest = new IndexRequest(); indexRequest.readFrom(in); From fe50db2e8d57ce588fc80eca7413955a8ff63a76 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 7 Oct 2016 18:28:51 -0400 Subject: [PATCH 15/53] fix bug in update operation in shard bulk execution --- .../action/bulk/TransportShardBulkAction.java | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 38417058e6a..8660c4eac26 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -233,14 +233,22 @@ public class TransportShardBulkAction extends TransportWriteAction writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); BytesReference indexSourceAsBytes = indexRequest.source(); IndexResponse indexResponse = writeResult.getResponse(); - UpdateResponse writeUpdateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); - if (updateRequest.fields() != null && updateRequest.fields().length > 0) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - writeUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + UpdateResponse update = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); + if (translate.getResponseResult() == DocWriteResponse.Result.CREATED) { + if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || + (updateRequest.fields() != null && updateRequest.fields().length > 0)) { + Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); + update.setGetResult(updateHelper.extractGetResult(updateRequest, updateRequest.concreteIndex(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + } else { + update.setGetResult(null); + } + } else { + assert translate.getResponseResult() == DocWriteResponse.Result.UPDATED; + update.setGetResult(updateHelper.extractGetResult(updateRequest, updateRequest.concreteIndex(), indexResponse.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), indexSourceAsBytes)); } // Replace the update request to the translated index request to execute on the replica. request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); - return new WriteResult<>(writeUpdateResponse, writeResult.getLocation()); + return new WriteResult<>(update, writeResult.getLocation()); case DELETED: DeleteRequest deleteRequest = translate.action(); WriteResult deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); From 225a04b2ccd562ad9fcbb4e9e39b06d7d174cc21 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Tue, 11 Oct 2016 14:21:09 -0400 Subject: [PATCH 16/53] fix update operation in bulk execution --- .../action/bulk/TransportShardBulkAction.java | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 8660c4eac26..6b4ffc56b40 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -234,17 +234,10 @@ public class TransportShardBulkAction extends TransportWriteAction 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - update.setGetResult(updateHelper.extractGetResult(updateRequest, updateRequest.concreteIndex(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); - } else { - update.setGetResult(null); - } - } else { - assert translate.getResponseResult() == DocWriteResponse.Result.UPDATED; - update.setGetResult(updateHelper.extractGetResult(updateRequest, updateRequest.concreteIndex(), indexResponse.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), indexSourceAsBytes)); + if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || + (updateRequest.fields() != null && updateRequest.fields().length > 0)) { + Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); + update.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); } // Replace the update request to the translated index request to execute on the replica. request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); From 661067d160e9ace12a8bbe3b12a783dd97bf78e6 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Tue, 11 Oct 2016 14:30:38 -0400 Subject: [PATCH 17/53] change DocumentRequest to DocumentRequest for readibility --- .../action/bulk/BulkItemRequest.java | 6 ++--- .../action/bulk/BulkProcessor.java | 10 ++++----- .../action/bulk/BulkRequest.java | 22 +++++++++---------- .../action/bulk/TransportBulkAction.java | 10 ++++----- .../action/bulk/TransportShardBulkAction.java | 8 +++---- .../action/ingest/IngestActionFilter.java | 6 ++--- .../ingest/PipelineExecutionService.java | 4 ++-- .../action/bulk/BulkRequestTests.java | 2 +- 8 files changed, 34 insertions(+), 34 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 079d4efe9bf..8c7786cb2ad 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -35,7 +35,7 @@ import java.io.IOException; public class BulkItemRequest implements Streamable { private int id; - private DocumentRequest request; + private DocumentRequest request; private volatile BulkItemResponse primaryResponse; private volatile boolean ignoreOnReplica; @@ -43,7 +43,7 @@ public class BulkItemRequest implements Streamable { } - public BulkItemRequest(int id, DocumentRequest request) { + public BulkItemRequest(int id, DocumentRequest request) { this.id = id; this.request = request; } @@ -52,7 +52,7 @@ public class BulkItemRequest implements Streamable { return id; } - public DocumentRequest request() { + public DocumentRequest request() { return request; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index f32bfaa775c..55347b2da13 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkProcessor add(IndexRequest request) { - return add((DocumentRequest) request); + return add((DocumentRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((DocumentRequest) request); + return add((DocumentRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(DocumentRequest request) { + public BulkProcessor add(DocumentRequest request) { return add(request, null); } - public BulkProcessor add(DocumentRequest request, @Nullable Object payload) { + public BulkProcessor add(DocumentRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(DocumentRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocumentRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index dc72407cf42..292ecdd33e7 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -72,7 +72,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List> requests = new ArrayList<>(); + final List requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -87,14 +87,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(DocumentRequest... requests) { - for (DocumentRequest request : requests) { + public BulkRequest add(DocumentRequest... requests) { + for (DocumentRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(DocumentRequest request) { + public BulkRequest add(DocumentRequest request) { return add(request, null); } @@ -104,7 +104,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(DocumentRequest request, @Nullable Object payload) { + public BulkRequest add(DocumentRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -120,8 +120,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable> requests) { - for (DocumentRequest request : requests) { + public BulkRequest add(Iterable requests) { + for (DocumentRequest request : requests) { add(request); } return this; @@ -207,7 +207,7 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List> requests() { + public List requests() { return this.requests; } @@ -508,7 +508,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (DocumentRequest actionRequest : requests) { + for (DocumentRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -526,7 +526,7 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (DocumentRequest request : requests) { + for (DocumentRequest request : requests) { // We first check if refresh has been set if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( @@ -561,7 +561,7 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (DocumentRequest request : requests) { + for (DocumentRequest request : requests) { DocumentRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f7861d1e093..48edb528fe1 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -145,7 +145,7 @@ public class TransportBulkAction extends HandledTransportAction request = bulkRequest.requests.get(i); + DocumentRequest request = bulkRequest.requests.get(i); if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) { bulkRequest.requests.set(i, null); } @@ -180,7 +180,7 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, DocumentRequest request, String index, Exception e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocumentRequest request, String index, Exception e) { if (index.equals(request.index())) { responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); return true; @@ -211,7 +211,7 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = bulkRequest.requests.get(i); + DocumentRequest documentRequest = bulkRequest.requests.get(i); //the request can only be null because we set it to null in the previous step, so it gets ignored if (documentRequest == null) { continue; @@ -253,7 +253,7 @@ public class TransportBulkAction extends HandledTransportAction Operations mapping Map> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocumentRequest request = bulkRequest.requests.get(i); + DocumentRequest request = bulkRequest.requests.get(i); if (request == null) { continue; } @@ -300,7 +300,7 @@ public class TransportBulkAction extends HandledTransportAction documentRequest = request.request(); + DocumentRequest documentRequest = request.request(); responses.set(request.id(), new BulkItemResponse(request.id(), documentRequest.opType(), new BulkItemResponse.Failure(indexName, documentRequest.type(), documentRequest.id(), e))); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 6b4ffc56b40..338e7db4cb7 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -147,14 +147,14 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = request.items()[j].request(); + DocumentRequest documentRequest = request.items()[j].request(); documentRequest.version(preVersions[j]); documentRequest.versionType(preVersionTypes[j]); } throw (ElasticsearchException) e; } BulkItemRequest item = request.items()[requestIndex]; - DocumentRequest documentRequest = item.request(); + DocumentRequest documentRequest = item.request(); if (isConflictException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), documentRequest.opType().getLowercase(), request), e); @@ -179,7 +179,7 @@ public class TransportShardBulkAction extends TransportWriteAction innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, BulkShardRequest request, int requestIndex) throws Exception { - DocumentRequest itemRequest = request.items()[requestIndex].request(); + DocumentRequest itemRequest = request.items()[requestIndex].request(); switch (itemRequest.opType()) { case CREATE: case INDEX: @@ -268,7 +268,7 @@ public class TransportShardBulkAction extends TransportWriteAction documentRequest = item.request(); + DocumentRequest documentRequest = item.request(); final Engine.Operation operation; try { switch (documentRequest.opType()) { diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index 70a117bf1f7..31801bac85a 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -135,7 +135,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio return Integer.MAX_VALUE; } - static final class BulkRequestModifier implements Iterator> { + static final class BulkRequestModifier implements Iterator { final BulkRequest bulkRequest; final Set failedSlots; @@ -151,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public DocumentRequest next() { + public DocumentRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -172,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - DocumentRequest request = bulkRequest.requests().get(i); + DocumentRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 57eb7afcb5a..1e11dbf78f2 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable> actionRequests, + public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (DocumentRequest actionRequest : actionRequests) { + for (DocumentRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 230373f7415..5d2145ddc3c 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List> requests = new ArrayList<>(); + List requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); From 0e8b6532ecc9e38c689b64ebb178271085e69c80 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Tue, 11 Oct 2016 16:00:10 -0400 Subject: [PATCH 18/53] rename DocumentRequest to DocWriteRequest --- .../noop/action/bulk/RestNoopBulkAction.java | 4 +- .../action/bulk/TransportNoopBulkAction.java | 4 +- ...umentRequest.java => DocWriteRequest.java} | 16 +++---- .../action/bulk/BulkItemRequest.java | 15 +++--- .../action/bulk/BulkItemResponse.java | 2 +- .../action/bulk/BulkProcessor.java | 12 ++--- .../action/bulk/BulkRequest.java | 28 +++++------ .../action/bulk/TransportBulkAction.java | 46 +++++++++---------- .../action/bulk/TransportShardBulkAction.java | 32 ++++++------- .../action/delete/DeleteRequest.java | 4 +- .../action/index/IndexRequest.java | 4 +- .../action/index/IndexRequestBuilder.java | 4 +- .../action/ingest/IngestActionFilter.java | 8 ++-- .../action/update/UpdateRequest.java | 4 +- .../org/elasticsearch/index/mapper/Uid.java | 2 - .../ingest/PipelineExecutionService.java | 6 +-- .../action/bulk/BulkRequestTests.java | 4 +- .../action/bulk/BulkWithUpdatesIT.java | 2 +- .../elasticsearch/action/bulk/RetryTests.java | 6 +-- .../action/index/IndexRequestTests.java | 10 ++-- .../ingest/BulkRequestModifierTests.java | 4 +- .../ingest/IngestActionFilterTests.java | 6 +-- .../document/DocumentActionsIT.java | 2 +- .../ingest/PipelineExecutionServiceTests.java | 4 +- .../routing/SimpleRoutingIT.java | 9 ++-- .../versioning/SimpleVersioningIT.java | 7 ++- .../AbstractAsyncBulkIndexByScrollAction.java | 6 +-- .../reindex/AsyncBulkByScrollActionTests.java | 17 ++++--- .../index/reindex/ReindexFailureTests.java | 2 +- .../index/reindex/ReindexVersioningTests.java | 2 +- 30 files changed, 130 insertions(+), 142 deletions(-) rename core/src/main/java/org/elasticsearch/action/{DocumentRequest.java => DocWriteRequest.java} (93%) diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 466821824a5..06082ed7d29 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -85,7 +85,7 @@ public class RestNoopBulkAction extends BaseRestHandler { } private static class BulkRestBuilderListener extends RestBuilderListener { - private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE, + private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 931e6724462..2a5efee1881 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -35,7 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class TransportNoopBulkAction extends HandledTransportAction { - private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE, + private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); @Inject diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java similarity index 93% rename from core/src/main/java/org/elasticsearch/action/DocumentRequest.java rename to core/src/main/java/org/elasticsearch/action/DocWriteRequest.java index fb9d8a27661..09db7089ff6 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -33,7 +33,7 @@ import java.util.Locale; * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} */ -public interface DocumentRequest extends IndicesRequest { +public interface DocWriteRequest extends IndicesRequest { /** * Get the index that this request operates on @@ -164,29 +164,29 @@ public interface DocumentRequest extends IndicesRequest { } /** read a document write (index/delete/update) request */ - static DocumentRequest readDocumentRequest(StreamInput in) throws IOException { + static DocWriteRequest readDocumentRequest(StreamInput in) throws IOException { byte type = in.readByte(); - DocumentRequest documentRequest; + DocWriteRequest docWriteRequest; if (type == 0) { IndexRequest indexRequest = new IndexRequest(); indexRequest.readFrom(in); - documentRequest = indexRequest; + docWriteRequest = indexRequest; } else if (type == 1) { DeleteRequest deleteRequest = new DeleteRequest(); deleteRequest.readFrom(in); - documentRequest = deleteRequest; + docWriteRequest = deleteRequest; } else if (type == 2) { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.readFrom(in); - documentRequest = updateRequest; + docWriteRequest = updateRequest; } else { throw new IllegalStateException("invalid request type [" + type+ " ]"); } - return documentRequest; + return docWriteRequest; } /** write a document write (index/delete/update) request*/ - static void writeDocumentRequest(StreamOutput out, DocumentRequest request) throws IOException { + static void writeDocumentRequest(StreamOutput out, DocWriteRequest request) throws IOException { if (request instanceof IndexRequest) { out.writeByte((byte) 0); ((IndexRequest) request).writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 8c7786cb2ad..07d43342b9c 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -19,10 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -35,7 +32,7 @@ import java.io.IOException; public class BulkItemRequest implements Streamable { private int id; - private DocumentRequest request; + private DocWriteRequest request; private volatile BulkItemResponse primaryResponse; private volatile boolean ignoreOnReplica; @@ -43,7 +40,7 @@ public class BulkItemRequest implements Streamable { } - public BulkItemRequest(int id, DocumentRequest request) { + public BulkItemRequest(int id, DocWriteRequest request) { this.id = id; this.request = request; } @@ -52,7 +49,7 @@ public class BulkItemRequest implements Streamable { return id; } - public DocumentRequest request() { + public DocWriteRequest request() { return request; } @@ -89,7 +86,7 @@ public class BulkItemRequest implements Streamable { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - request = DocumentRequest.readDocumentRequest(in); + request = DocWriteRequest.readDocumentRequest(in); if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } @@ -99,7 +96,7 @@ public class BulkItemRequest implements Streamable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - DocumentRequest.writeDocumentRequest(out, request); + DocWriteRequest.writeDocumentRequest(out, request); out.writeOptionalStreamable(primaryResponse); out.writeBoolean(ignoreOnReplica); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 9f0714784bc..2a1c3a1e35a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest.OpType; +import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 55347b2da13..6dacb21b239 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; @@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkProcessor add(IndexRequest request) { - return add((DocumentRequest) request); + return add((DocWriteRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((DocumentRequest) request); + return add((DocWriteRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(DocumentRequest request) { + public BulkProcessor add(DocWriteRequest request) { return add(request, null); } - public BulkProcessor add(DocumentRequest request, @Nullable Object payload) { + public BulkProcessor add(DocWriteRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(DocumentRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocWriteRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 292ecdd33e7..39102913262 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -72,7 +72,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List requests = new ArrayList<>(); + final List requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -87,14 +87,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(DocumentRequest... requests) { - for (DocumentRequest request : requests) { + public BulkRequest add(DocWriteRequest... requests) { + for (DocWriteRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(DocumentRequest request) { + public BulkRequest add(DocWriteRequest request) { return add(request, null); } @@ -104,7 +104,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(DocumentRequest request, @Nullable Object payload) { + public BulkRequest add(DocWriteRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -120,8 +120,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable requests) { - for (DocumentRequest request : requests) { + public BulkRequest add(Iterable requests) { + for (DocWriteRequest request : requests) { add(request); } return this; @@ -207,7 +207,7 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List requests() { + public List requests() { return this.requests; } @@ -508,7 +508,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (DocumentRequest actionRequest : requests) { + for (DocWriteRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -526,7 +526,7 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (DocumentRequest request : requests) { + for (DocWriteRequest request : requests) { // We first check if refresh has been set if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( @@ -550,7 +550,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards = ActiveShardCount.readFrom(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - requests.add(DocumentRequest.readDocumentRequest(in)); + requests.add(DocWriteRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); timeout = new TimeValue(in); @@ -561,8 +561,8 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (DocumentRequest request : requests) { - DocumentRequest.writeDocumentRequest(out, request); + for (DocWriteRequest request : requests) { + DocWriteRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 48edb528fe1..854b2fcf892 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -118,7 +118,7 @@ public class TransportBulkAction extends HandledTransportAction autoCreateIndices = bulkRequest.requests.stream() - .map(DocumentRequest::index) + .map(DocWriteRequest::index) .collect(Collectors.toSet()); final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); ClusterState state = clusterService.state(); @@ -145,7 +145,7 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, DocumentRequest request, String index, Exception e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocWriteRequest request, String index, Exception e) { if (index.equals(request.index())) { responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); return true; @@ -211,20 +211,20 @@ public class TransportBulkAction extends HandledTransportAction Operations mapping Map> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocumentRequest request = bulkRequest.requests.get(i); + DocWriteRequest request = bulkRequest.requests.get(i); if (request == null) { continue; } @@ -300,9 +300,9 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, - final ConcreteIndices concreteIndices, - final MetaData metaData) { + private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, BulkRequest bulkRequest, AtomicArray responses, int idx, + final ConcreteIndices concreteIndices, + final MetaData metaData) { Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { @@ -362,7 +362,7 @@ public class TransportBulkAction extends HandledTransportAction writeResult = innerExecuteBulkItemRequest(metaData, indexShard, request, requestIndex); @@ -147,20 +147,20 @@ public class TransportShardBulkAction extends TransportWriteAction) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), documentRequest.opType().getLowercase(), request), e); + request.shardId(), docWriteRequest.opType().getLowercase(), request), e); } else { logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), documentRequest.opType().getLowercase(), request), e); + request.shardId(), docWriteRequest.opType().getLowercase(), request), e); } // if its a conflict failure, and we already executed the request on a primary (and we execute it // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) @@ -168,8 +168,8 @@ public class TransportShardBulkAction extends TransportWriteAction innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, BulkShardRequest request, int requestIndex) throws Exception { - DocumentRequest itemRequest = request.items()[requestIndex].request(); + DocWriteRequest itemRequest = request.items()[requestIndex].request(); switch (itemRequest.opType()) { case CREATE: case INDEX: @@ -268,19 +268,19 @@ public class TransportShardBulkAction extends TransportWriteAction implements DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index cce0f6c8eef..509f393b005 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; @@ -67,7 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { +public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index a9d8bcaa56b..310ef3fb928 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.index; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -201,7 +201,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder { + static final class BulkRequestModifier implements Iterator { final BulkRequest bulkRequest; final Set failedSlots; @@ -151,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public DocumentRequest next() { + public DocWriteRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -172,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - DocumentRequest request = bulkRequest.requests().get(i); + DocWriteRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index deca938fa6a..2f82aff1b4f 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; @@ -57,7 +57,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ public class UpdateRequest extends InstanceShardOperationRequest - implements DocumentRequest, WriteRequest { + implements DocWriteRequest, WriteRequest { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(UpdateRequest.class)); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java index 2a8938b4ab7..344c8dc0cc0 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java @@ -21,12 +21,10 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.common.lucene.BytesRefs; import java.util.Collection; import java.util.Collections; -import java.util.List; /** * diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 1e11dbf78f2..6c701e59c90 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable actionRequests, + public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (DocumentRequest actionRequest : actionRequests) { + for (DocWriteRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 5d2145ddc3c..57aa0cbb9a4 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List requests = new ArrayList<>(); + List requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 590a503a654..e2d3e87c210 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -47,7 +47,7 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.function.Function; -import static org.elasticsearch.action.DocumentRequest.OpType; +import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 72bdc8a58f9..c0e735ec33c 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -20,12 +20,8 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; -import org.elasticsearch.action.DocumentRequest.OpType; -import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index da25ec4261f..eb8c07525c6 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.unit.TimeValue; @@ -49,13 +49,13 @@ public class IndexRequestTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest(""); indexRequest.opType(create); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE)); indexRequest.opType(createUpper); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.CREATE)); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE)); indexRequest.opType(index); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX)); indexRequest.opType(indexUpper); - assertThat(indexRequest.opType() , equalTo(DocumentRequest.OpType.INDEX)); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX)); } public void testReadBogusString() { diff --git a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java index 8dac5853cac..7bd4f7fb7a6 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; */ import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -116,7 +116,7 @@ public class BulkRequestModifierTests extends ESTestCase { }); List originalResponses = new ArrayList<>(); - for (DocumentRequest actionRequest : bulkRequest.requests()) { + for (DocWriteRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, true); originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java index 2b9f9c55320..9dbef147c01 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { if (rarely()) { - DocumentRequest request; + DocWriteRequest request; if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); } else { @@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase { verifyZeroInteractions(actionListener); int assertedRequests = 0; - for (DocumentRequest actionRequest : bulkRequest.requests()) { + for (DocWriteRequest actionRequest : bulkRequest.requests()) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; assertThat(indexRequest.sourceAsMap().size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index d198529f8d4..e3556c8cc7c 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -37,7 +37,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; -import static org.elasticsearch.action.DocumentRequest.OpType; +import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; import static org.elasticsearch.client.Requests.getRequest; import static org.elasticsearch.client.Requests.indexRequest; diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index 8b22e4f0bc8..b9426b83e66 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -317,7 +317,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); int numIndexRequests = 0; for (int i = 0; i < numRequest; i++) { - DocumentRequest request; + DocWriteRequest request; if (randomBoolean()) { if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index 5980f781e2e..2490134db4e 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -20,8 +20,7 @@ package org.elasticsearch.routing; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -261,7 +260,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.INDEX)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -282,7 +281,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.UPDATE)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -303,7 +302,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo(DocumentRequest.OpType.DELETE)); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 417defee5fa..2603c3f0c84 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -21,10 +21,9 @@ package org.elasticsearch.versioning; import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.uid.Versions; @@ -690,7 +689,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(DocumentRequest.OpType.INDEX) + .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() @@ -759,7 +758,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(DocumentRequest.OpType.INDEX) + .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index 1f135500dfd..85ac04c1619 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -154,9 +154,9 @@ public abstract class AbstractAsyncBulkIndexByScrollAction> { + interface RequestWrapper> { void setIndex(String index); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 4ea7f039970..8e08fbad718 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse.Result; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -49,7 +49,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; @@ -261,27 +260,27 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { ShardId shardId = new ShardId(new Index("name", "uid"), 0); if (rarely()) { versionConflicts++; - responses[i] = new BulkItemResponse(i, randomFrom(DocumentRequest.OpType.values()), + responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test"))); continue; } boolean createdResponse; - DocumentRequest.OpType opType; + DocWriteRequest.OpType opType; switch (randomIntBetween(0, 2)) { case 0: createdResponse = true; - opType = DocumentRequest.OpType.CREATE; + opType = DocWriteRequest.OpType.CREATE; created++; break; case 1: createdResponse = false; - opType = randomFrom(DocumentRequest.OpType.INDEX, DocumentRequest.OpType.UPDATE); + opType = randomFrom(DocWriteRequest.OpType.INDEX, DocWriteRequest.OpType.UPDATE); updated++; break; case 2: createdResponse = false; - opType = DocumentRequest.OpType.DELETE; + opType = DocWriteRequest.OpType.DELETE; deleted++; break; default: @@ -363,7 +362,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] - {new BulkItemResponse(0, DocumentRequest.OpType.CREATE, failure)}, randomLong()); + {new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -769,7 +768,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()]; for (int i = 0; i < bulk.requests().size(); i++) { - DocumentRequest item = bulk.requests().get(i); + DocWriteRequest item = bulk.requests().get(i); DocWriteResponse response; ShardId shardId = new ShardId(new Index(item.index(), "uuid"), 0); if (item instanceof IndexRequest) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index c909ea42ecb..9bfa41da7f3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; +import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java index 041c796b173..1ab0613103f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.get.GetResponse; -import static org.elasticsearch.action.DocumentRequest.OpType.CREATE; +import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE; import static org.elasticsearch.index.VersionType.EXTERNAL; import static org.elasticsearch.index.VersionType.INTERNAL; From e1ad00f07a5ec9c459ea517a623021b1347ef232 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 12 Oct 2016 14:41:44 +0200 Subject: [PATCH 19/53] Made REST query param types consistent duration -> time integer,float -> number --- .../main/resources/rest-api-spec/api/delete_by_query.json | 6 +++--- .../src/main/resources/rest-api-spec/api/index.json | 2 +- .../src/main/resources/rest-api-spec/api/reindex.json | 2 +- .../resources/rest-api-spec/api/reindex_rethrottle.json | 2 +- .../src/main/resources/rest-api-spec/api/scroll.json | 2 +- .../src/main/resources/rest-api-spec/api/search.json | 2 +- .../main/resources/rest-api-spec/api/search_template.json | 2 +- .../src/main/resources/rest-api-spec/api/update.json | 2 +- .../main/resources/rest-api-spec/api/update_by_query.json | 6 +++--- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index a734f7b1bac..d27d00143b9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -94,7 +94,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -182,7 +182,7 @@ "description" : "Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, "scroll_size": { - "type": "integer", + "type": "number", "defaut_value": 100, "description": "Size on the scroll request powering the update_by_query" }, @@ -192,7 +192,7 @@ "description" : "Should the request should block until the delete-by-query is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, "description": "The throttle for this request in sub-requests per second. -1 means set no throttle." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 677219addee..814a53c1141 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -54,7 +54,7 @@ "description" : "Explicit timestamp for the document" }, "ttl": { - "type" : "duration", + "type" : "time", "description" : "Expiration time for the document" }, "version" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index 5fb4fe58db3..79f3b883767 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -26,7 +26,7 @@ "description" : "Should the request should block until the reindex is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json index 5be7ea27407..4bba41d37d5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json @@ -13,7 +13,7 @@ }, "params": { "requests_per_second": { - "type": "float", + "type": "number", "required": true, "description": "The throttle to set on this request in floating sub-requests per second. -1 means set no throttle." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json index 885b746d095..699ddcc9e00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json @@ -13,7 +13,7 @@ }, "params": { "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "scroll_id": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 21fda8dc805..4b473ae4062 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -89,7 +89,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index ff1d35bb417..b9339b55332 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -39,7 +39,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index d87e4c5e7f5..7e7fffcee07 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -73,7 +73,7 @@ "description": "Explicit timestamp for the document" }, "ttl": { - "type": "duration", + "type": "time", "description": "Expiration time for the document" }, "version": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index b7f608b8b4f..996d9ff10c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -102,7 +102,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -194,7 +194,7 @@ "description" : "Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, "scroll_size": { - "type": "integer", + "type": "number", "defaut_value": 100, "description": "Size on the scroll request powering the update_by_query" }, @@ -204,7 +204,7 @@ "description" : "Should the request should block until the reindex is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." } From 4c62e14c50d337fb3da89b936ea35ee65d337e5f Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 12 Oct 2016 14:48:49 +0200 Subject: [PATCH 20/53] Made REST query param types consistent text -> string --- .../src/main/resources/rest-api-spec/api/delete_by_query.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/search.json | 2 +- .../src/main/resources/rest-api-spec/api/update_by_query.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index d27d00143b9..8c2b257fcbf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -149,7 +149,7 @@ "description" : "How many suggestions to return in response" }, "suggest_text": { - "type" : "text", + "type" : "string", "description" : "The source text for which the suggestions should be returned" }, "timeout": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 4b473ae4062..5aa7a409a06 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -140,7 +140,7 @@ "description" : "How many suggestions to return in response" }, "suggest_text": { - "type" : "text", + "type" : "string", "description" : "The source text for which the suggestions should be returned" }, "timeout": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 996d9ff10c2..3974eaae88d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -157,7 +157,7 @@ "description" : "How many suggestions to return in response" }, "suggest_text": { - "type" : "text", + "type" : "string", "description" : "The source text for which the suggestions should be returned" }, "timeout": { From 27c87ab96185f37cdc5d09b3c8a1e1e0d3ab3d76 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 12 Oct 2016 15:28:57 +0200 Subject: [PATCH 21/53] improve testAutoGenerateIdNoDuplicates logging on failure Add unique doc content and log the results of the search results on failure, so we can better see what went wrong --- .../elasticsearch/indexing/IndexActionIT.java | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 14b1b7c1b5e..8682d8127ae 100644 --- a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -23,10 +23,10 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.InvalidIndexNameException; @@ -34,6 +34,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Collection; @@ -47,7 +49,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -57,6 +58,8 @@ public class IndexActionIT extends ESIntegTestCase { * This test tries to simulate load while creating an index and indexing documents * while the index is being created. */ + + @TestLogging("_root:DEBUG,org.elasticsearch.index.shard.IndexShard:TRACE,org.elasticsearch.action.search:TRACE") public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { @@ -66,7 +69,7 @@ public class IndexActionIT extends ESIntegTestCase { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test", "type").setSource("field", "value")); + builders.add(client().prepareIndex("test", "type").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); @@ -74,7 +77,13 @@ public class IndexActionIT extends ESIntegTestCase { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search with all types"); - assertHitCount(client().prepareSearch("test").get(), numOfDocs); + SearchResponse response = client().prepareSearch("test").get(); + if (response.getHits().totalHits() != numOfDocs) { + final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } } catch (Exception e) { logger.error("search for all docs types failed", e); if (firstError == null) { @@ -83,7 +92,13 @@ public class IndexActionIT extends ESIntegTestCase { } try { logger.debug("running search with a specific type"); - assertHitCount(client().prepareSearch("test").setTypes("type").get(), numOfDocs); + SearchResponse response = client().prepareSearch("test").setTypes("type").get(); + if (response.getHits().totalHits() != numOfDocs) { + final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } } catch (Exception e) { logger.error("search for all docs of a specific type failed", e); if (firstError == null) { From 6c0e4fc13d89b8d935f953d416679c5d066f6016 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 10 Jun 2016 15:51:55 +0200 Subject: [PATCH 22/53] Add test for using fuzziness parameter in multi_match query There was an issue with using fuzziness parameter in multi_match query that has been reported in #18710 and was fixed in Lucene 6.2 that is now used on master. In order to verify that fix and close the original issue this PR adds the test from that issue as an integration test. --- .../search/query/MultiMatchQueryIT.java | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 693fffa307a..c3fce0a76a1 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -635,6 +635,43 @@ public class MultiMatchQueryIT extends ESIntegTestCase { assertFirstHit(searchResponse, hasId("ultimate1")); } + /** + * Test for edge case where field level boosting is applied to field that doesn't exist on documents on + * one shard. There was an issue reported in https://github.com/elastic/elasticsearch/issues/18710 where a + * `multi_match` query using the fuzziness parameter with a boost on one of two fields returns the + * same document score if both documents are placed on different shard. This test recreates that scenario + * and checks that the returned scores are different. + */ + public void testFuzzyFieldLevelBoosting() throws InterruptedException, ExecutionException { + String idx = "test18710"; + CreateIndexRequestBuilder builder = prepareCreate(idx).setSettings(Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + ); + assertAcked(builder.addMapping("type", "title", "type=string", "body", "type=string")); + ensureGreen(); + List builders = new ArrayList<>(); + builders.add(client().prepareIndex(idx, "type", "1").setSource( + "title", "foo", + "body", "bar")); + builders.add(client().prepareIndex(idx, "type", "2").setSource( + "title", "bar", + "body", "foo")); + indexRandom(true, false, builders); + + SearchResponse searchResponse = client().prepareSearch(idx) + .setExplain(true) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body") + .fuzziness(0) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); + assertEquals("1", hits[0].getId()); + assertEquals("2", hits[1].getId()); + assertThat(hits[0].getScore(), greaterThan(hits[1].score())); + } + private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { assertNoFailures(left); assertNoFailures(right); From bbe6555b7abe4a36cea243d4d599f1355621752d Mon Sep 17 00:00:00 2001 From: Robin Clarke Date: Wed, 12 Oct 2016 17:09:34 +0200 Subject: [PATCH 23/53] Docs: your -> you're (#20883) --- .../reference/aggregations/bucket/children-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index 915a3c6a389..fa89314a230 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -54,7 +54,7 @@ PUT child_example/answer/1?parent=1&refresh "display_name": "Sam", "id": 48 }, - "body": "

    Unfortunately your pretty much limited to FTP...", + "body": "

    Unfortunately you're pretty much limited to FTP...", "creation_date": "2009-05-04T13:45:37.030" } PUT child_example/answer/2?parent=1&refresh From 06cfffa0a9b18ed8b1418b14327779452c777d10 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 12 Oct 2016 17:49:24 +0200 Subject: [PATCH 24/53] Explain how unreleased versions should be added to the codebase without adding it to Version.java (#20892) Sometimes it's useful / needed to use unreleased Version constants but we should not add those to the Version.java class for several reasons ie. BWC tests and assertions along those lines. Yet, it's not really obvious how to do that so I added some comments and a simple test for this. --- core/src/main/java/org/elasticsearch/Version.java | 9 +++++++++ .../test/java/org/elasticsearch/VersionTests.java | 13 +++++++++++++ 2 files changed, 22 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 65cb867bec9..9002328591c 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -95,6 +95,15 @@ public class Version { public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final Version CURRENT = V_6_0_0_alpha1; + /* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT. + * If you need a version that doesn't exist here for instance V_5_1_0 then go and create such a version + * as a constant where you need it: + *

    +     *   public static final Version V_5_1_0_UNRELEASED = new Version(5010099, Version.CURRENT.luceneVersion);
    +     * 
    + * Then go to VersionsTest.java and add a test for this constant VersionTests#testUnknownVersions(). + * This is particularly useful if you are building a feature that needs a BWC layer for this unreleased version etc.*/ + static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" + org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]"; diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 862cccab318..c3238011f67 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; +import org.junit.Assert; import java.lang.reflect.Modifier; import java.util.HashMap; @@ -279,4 +280,16 @@ public class VersionTests extends ESTestCase { } } } + private static final Version V_20_0_0_UNRELEASED = new Version(20000099, Version.CURRENT.luceneVersion); + + // see comment in Version.java about this test + public void testUnknownVersions() { + assertUnknownVersion(V_20_0_0_UNRELEASED); + expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT)); + } + + public static void assertUnknownVersion(Version version) { + assertFalse("Version " + version + " has been releaed don't use a new instance of this version", + VersionUtils.allVersions().contains(version)); + } } From 3d3ed7a83a4c7bf4a740ae7e68f39182a120a75f Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 12 Oct 2016 19:00:41 +0200 Subject: [PATCH 25/53] Increase number of allowed failures in MockRepository for snapshot restore test The test testDataFileCorruptionDuringRestore expects failures to happen when accessing snapshot data. It would sometimes fail however as MockRepository (by default) only simulates 100 failures. --- .../elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index fb55f5bb767..200ec6ac4b1 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -715,6 +715,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("use_lucene_corruption", true) + .put("max_failure_number", 10000000L) .put("random_data_file_io_exception_rate", 1.0))); // Test restore after index deletion From 968fbaceef298908a6ce6b31e42fb82586743fb7 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 12 Oct 2016 22:13:13 +0200 Subject: [PATCH 26/53] Never use ThreadPool#estimatedTimeInMillis as wall-clock time replacement --- core/src/main/java/org/elasticsearch/index/IndexService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 8101397a45c..2b32c3ef92a 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -476,7 +476,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust * used for rewriting since it does not know about the current {@link IndexReader}. */ public QueryShardContext newQueryShardContext() { - return newQueryShardContext(0, null, threadPool::estimatedTimeInMillis); + return newQueryShardContext(0, null, System::currentTimeMillis); } /** From 12392b5425fe7c96da671affd797f65fc3fa2dc9 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 12 Oct 2016 22:33:47 +0200 Subject: [PATCH 27/53] Ensure port range is readable in the exception message (#20893) Both netty3 and netty4 http implementation printed the default toString representation of PortRange if ports couldn't be bound. This commit adds a better default toString method to PortRange and uses the string representation for the error message in the http implementations. --- .../common/transport/PortsRange.java | 7 ++ .../netty3/Netty3HttpServerTransport.java | 83 ++++++++-------- .../Netty3HttpServerTransportTests.java | 15 +++ .../netty3/SimpleNetty3TransportTests.java | 24 +++++ .../netty4/Netty4HttpServerTransport.java | 99 ++++++++++--------- .../Netty4HttpServerTransportTests.java | 16 ++- .../netty4/SimpleNetty4TransportTests.java | 24 +++++ 7 files changed, 182 insertions(+), 86 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java index aef5db31e2d..f567264d26e 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java +++ b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java @@ -80,4 +80,11 @@ public class PortsRange { public interface PortCallback { boolean onPortNumber(int portNumber); } + + @Override + public String toString() { + return "PortsRange{" + + "portRange='" + portRange + '\'' + + '}'; + } } diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java index e961f497bcd..114d7c6b303 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java @@ -281,34 +281,42 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem @Override protected void doStart() { - this.serverOpenChannels = new Netty3OpenChannelsHandler(logger); - if (blockingServer) { - serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)) - )); - } else { - serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)), - workerCount)); - } - serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory()); + boolean success = false; + try { + this.serverOpenChannels = new Netty3OpenChannelsHandler(logger); + if (blockingServer) { + serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)) + )); + } else { + serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)), + workerCount)); + } + serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory()); - serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); - serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); - if (tcpSendBufferSize.getBytes() > 0) { + serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); + serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); + if (tcpSendBufferSize.getBytes() > 0) { - serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes()); + serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes()); + } + if (tcpReceiveBufferSize.getBytes() > 0) { + serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes()); + } + serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); + serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); + serverBootstrap.setOption("reuseAddress", reuseAddress); + serverBootstrap.setOption("child.reuseAddress", reuseAddress); + this.boundAddress = createBoundHttpAddress(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } } - if (tcpReceiveBufferSize.getBytes() > 0) { - serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes()); - } - serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); - serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); - serverBootstrap.setOption("reuseAddress", reuseAddress); - serverBootstrap.setOption("child.reuseAddress", reuseAddress); - this.boundAddress = createBoundHttpAddress(); } private BoundTransportAddress createBoundHttpAddress() { @@ -402,24 +410,21 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem private TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = port.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - synchronized (serverChannels) { - Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); - serverChannels.add(channel); - boundSocket.set((InetSocketAddress) channel.getLocalAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); + serverChannels.add(channel); + boundSocket.set((InetSocketAddress) channel.getLocalAddress()); } - return true; + } catch (Exception e) { + lastException.set(e); + return false; } + return true; }); if (!success) { - throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); } if (logger.isDebugEnabled()) { diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java index d291f76ff38..6ab4dbd709f 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java @@ -22,8 +22,10 @@ package org.elasticsearch.http.netty3; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.netty3.cors.Netty3CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -103,4 +105,17 @@ public class Netty3HttpServerTransportTests extends ESTestCase { assertThat(corsConfig.allowedRequestMethods().stream().map(HttpMethod::getName).collect(Collectors.toSet()), equalTo(methods)); transport.close(); } + + public void testBindUnavailableAddress() { + try (Netty3HttpServerTransport transport = new Netty3HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (Netty3HttpServerTransport otherTransport = new Netty3HttpServerTransport(settings, networkService, bigArrays, + threadPool)) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java index e57d36cbc58..b7f20df75a5 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java @@ -28,9 +28,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -78,4 +80,26 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); } } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 98d4eeca17b..0f1fe2a9059 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -285,40 +285,50 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem @Override protected void doStart() { - this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); + boolean success = false; + try { + this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); - serverBootstrap = new ServerBootstrap(); - if (blockingServer) { - serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - serverBootstrap.channel(OioServerSocketChannel.class); - } else { - serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - serverBootstrap.channel(NioServerSocketChannel.class); + serverBootstrap = new ServerBootstrap(); + if (blockingServer) { + serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, + HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); + serverBootstrap.channel(OioServerSocketChannel.class); + } else { + serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, + HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); + serverBootstrap.channel(NioServerSocketChannel.class); + } + + serverBootstrap.childHandler(configureServerChannelHandler()); + + serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)); + serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); + serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + this.boundAddress = createBoundHttpAddress(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } } - - serverBootstrap.childHandler(configureServerChannelHandler()); - - serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)); - serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); - - final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); - if (tcpSendBufferSize.getBytes() > 0) { - serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); - } - - final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); - if (tcpReceiveBufferSize.getBytes() > 0) { - serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); - } - - serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - - final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); - serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); - - this.boundAddress = createBoundHttpAddress(); } private BoundTransportAddress createBoundHttpAddress() { @@ -417,24 +427,21 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem private TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = port.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - synchronized (serverChannels) { - ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync(); - serverChannels.add(future.channel()); - boundSocket.set((InetSocketAddress) future.channel().localAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync(); + serverChannels.add(future.channel()); + boundSocket.set((InetSocketAddress) future.channel().localAddress()); } - return true; + } catch (Exception e) { + lastException.set(e); + return false; } + return true; }); if (!success) { - throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); } if (logger.isDebugEnabled()) { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index ceed9d9a503..498daf63226 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; @@ -123,7 +124,7 @@ public class Netty4HttpServerTransportTests extends ESTestCase { transport.httpServerAdapter((request, channel, context) -> channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done")))); transport.start(); - TransportAddress remoteAddress = (TransportAddress) randomFrom(transport.boundAddress().boundAddresses()); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (Netty4HttpClient client = new Netty4HttpClient()) { FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); @@ -140,4 +141,17 @@ public class Netty4HttpServerTransportTests extends ESTestCase { } } } + + public void testBindUnavailableAddress() { + try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, + threadPool)) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 93468d8f2ea..a7a674007ba 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -28,9 +28,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -79,4 +81,26 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase } } + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } + } From 1bcd26627c2c9fe9bf706271808322f57ab91204 Mon Sep 17 00:00:00 2001 From: Thibaud BARDIN Date: Thu, 13 Oct 2016 08:53:30 +0200 Subject: [PATCH 28/53] [DOCS] Fix typo in "Wait For Active Shards" part (#20900) Add missing closing backtick --- docs/reference/docs/index_.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 4fc4182505b..b2d4bb562ea 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -325,7 +325,7 @@ of configured copies per shard in the index (which is `number_of_replicas+1`). Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose we have a cluster of three nodes, `A, `B`, and `C` and +For example, suppose we have a cluster of three nodes, `A`, `B`, and `C` and we create an index `index` with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If we attempt an indexing operation, by default the operation will only ensure From 71aa807acdcc5095d494657774e178770fa687c7 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 13 Oct 2016 08:44:41 +0100 Subject: [PATCH 29/53] Fixes MultiMatchQuery so that it doesn't provide a null context (#20882) Before this change the `MultiMatchQuery` called the field types `termQuery()` with a null context. This is not correct so this change fixes this so the `MultiMatchQuery` now uses the `ShardQueryContext` it stores as a field. Relates to https://github.com/elastic/elasticsearch/pull/20796#pullrequestreview-3606305 --- .../index/search/MultiMatchQuery.java | 7 ++++--- .../index/search/MultiMatchQueryTests.java | 12 ++++++++---- .../search/query/MultiMatchQueryIT.java | 19 +++++++++++++++++++ 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 36c67e3989a..917b1d80ca3 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -227,7 +227,7 @@ public class MultiMatchQuery extends MatchQuery { if (blendedFields == null) { return super.blendTerm(term, fieldType); } - return MultiMatchQuery.blendTerm(term.bytes(), commonTermsCutoff, tieBreaker, blendedFields); + return MultiMatchQuery.blendTerm(context, term.bytes(), commonTermsCutoff, tieBreaker, blendedFields); } @Override @@ -241,7 +241,8 @@ public class MultiMatchQuery extends MatchQuery { } } - static Query blendTerm(BytesRef value, Float commonTermsCutoff, float tieBreaker, FieldAndFieldType... blendedFields) { + static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker, + FieldAndFieldType... blendedFields) { List queries = new ArrayList<>(); Term[] terms = new Term[blendedFields.length]; float[] blendedBoost = new float[blendedFields.length]; @@ -249,7 +250,7 @@ public class MultiMatchQuery extends MatchQuery { for (FieldAndFieldType ft : blendedFields) { Query query; try { - query = ft.fieldType.termQuery(value, null); + query = ft.fieldType.termQuery(value, context); } catch (IllegalArgumentException e) { // the query expects a certain class of values such as numbers // of ip addresses and the value can't be parsed, so ignore this diff --git a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 88d22de6e27..2454150be56 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -101,7 +101,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {2, 3}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -115,7 +116,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {200, 30}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -132,7 +134,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz") }; float[] boosts = new float[] {2}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -154,7 +157,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { .add(expectedClause1, Occur.SHOULD) .add(expectedClause2, Occur.SHOULD) .build(); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, + new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index c3fce0a76a1..1a10a700948 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -122,6 +122,13 @@ public class MultiMatchQueryIT extends ESIntegTestCase { "last_name", "", "category", "marvel hero", "skill", 1)); + + builders.add(client().prepareIndex("test", "test", "nowHero").setSource( + "full_name", "now sort of", + "first_name", "now", + "last_name", "", + "category", "marvel hero", + "skill", 1)); List firstNames = new ArrayList<>(); fill(firstNames, "Captain", between(15, 25)); fill(firstNames, "Ultimate", between(5, 10)); @@ -164,6 +171,9 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .field("norms", false) .field("copy_to", "last_name_phrase") .endObject() + .startObject("date") + .field("type", "date") + .endObject() .endObject() .endObject().endObject(); } @@ -633,6 +643,15 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .lenient(true))).get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("ultimate1")); + + + // Check that cross fields works with date fields + searchResponse = client().prepareSearch("test") + .setQuery(randomizeType(multiMatchQuery("now", "f*", "date") + .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true)) + .get(); + assertHitCount(searchResponse, 1L); + assertFirstHit(searchResponse, hasId("nowHero")); } /** From 389d3656974b3d6bbaf2f9dc3d1c25be2144ad9b Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 13 Oct 2016 09:58:00 +0200 Subject: [PATCH 30/53] Fix YAML formatting in several REST tests --- .../test/indices.put_settings/11_reset.yaml | 3 +- ...0_remove_index_and_replace_with_alias.yaml | 3 +- .../rest-api-spec/test/scroll/12_slices.yaml | 15 +++---- .../test/search.aggregation/20_terms.yaml | 40 +++++++++---------- .../test/search.inner_hits/10_basic.yaml | 24 ++++------- .../test/suggest/30_context.yaml | 15 ++++--- 6 files changed, 46 insertions(+), 54 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml index 6c93dabeec7..bc2dace0e18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml @@ -22,8 +22,7 @@ Test reset index settings: - do: indices.get_settings: flat_settings: false - - is_false: - test-index.settings.index\.refresh_interval + - is_false: test-index.settings.index\.refresh_interval - do: indices.get_settings: include_defaults: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml index 14e258a6bb4..66da068895f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml @@ -31,4 +31,5 @@ - do: indices.get_mapping: index: test - - is_true: test_2 # the name of the index that the alias points to, would be `test` if the index were still there + # the name of the index that the alias points to, would be `test` if the index were still there + - is_true: test_2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml index 5443059135a..1695bdb2352 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml @@ -21,10 +21,9 @@ scroll: 1m sort: foo body: - slice: { - id: 0, + slice: + id: 0 max: 3 - } query: match_all: {} @@ -41,10 +40,9 @@ size: 1 scroll: 1m body: - slice: { - id: 0, + slice: + id: 0 max: 1025 - } query: match_all: {} @@ -60,10 +58,9 @@ size: 1 scroll: 1m body: - slice: { - id: 0, + slice: + id: 0 max: 1025 - } query: match_all: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index c35e79e6cfe..029b44544fd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -47,30 +47,30 @@ setup: type: test id: 3 body: { "str": "bcd" } - + - do: indices.refresh: {} - + - do: search: body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str" } } } } - match: { hits.total: 3 } - + - length: { aggregations.str_terms.buckets: 2 } - + - match: { aggregations.str_terms.buckets.0.key: "abc" } - + - is_false: aggregations.str_terms.buckets.0.key_as_string - + - match: { aggregations.str_terms.buckets.0.doc_count: 2 } - + - match: { aggregations.str_terms.buckets.1.key: "bcd" } - + - is_false: aggregations.str_terms.buckets.1.key_as_string - + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } - + --- "IP test": - do: @@ -112,9 +112,9 @@ setup: - match: { aggregations.ip_terms.buckets.0.doc_count: 2 } - match: { aggregations.ip_terms.buckets.1.key: "127.0.0.1" } - + - is_false: aggregations.ip_terms.buckets.1.key_as_string - + - match: { aggregations.ip_terms.buckets.1.doc_count: 1 } - do: @@ -142,7 +142,7 @@ setup: search: body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : "127.*" } } } } - + --- "Boolean test": @@ -327,7 +327,7 @@ setup: - match: { aggregations.date_terms.buckets.1.key_as_string: "2014-09-01T00:00:00.000Z" } - match: { aggregations.date_terms.buckets.1.doc_count: 1 } - + - do: search: body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "include" : [ "2016-05-03" ] } } } } @@ -335,11 +335,11 @@ setup: - match: { hits.total: 3 } - length: { aggregations.date_terms.buckets: 1 } - + - match: { aggregations.date_terms.buckets.0.key_as_string: "2016-05-03T00:00:00.000Z" } - - - match: { aggregations.date_terms.buckets.0.doc_count: 2 } - + + - match: { aggregations.date_terms.buckets.0.doc_count: 2 } + - do: search: body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "exclude" : [ "2016-05-03" ] } } } } @@ -347,7 +347,7 @@ setup: - match: { hits.total: 3 } - length: { aggregations.date_terms.buckets: 1 } - + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } - - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml index 98e61dd9fa9..100b44dcb04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml @@ -5,19 +5,14 @@ setup: index: test body: mappings: - type_1: { - properties: { - nested_field : { + type_1: + properties: + nested_field: type: nested - } - } - } type_2: {} - type_3: { - _parent: { + type_3: + _parent: type: type_2 - } - } --- "Nested inner hits": @@ -26,13 +21,8 @@ setup: index: test type: type_1 id: 1 - body: { - "nested_field" : [ - { - "foo": "bar" - } - ] - } + body: + "nested_field" : [ { "foo": "bar" } ] - do: indices.refresh: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml index da7af85cf9f..d47b52ce02c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml @@ -15,28 +15,33 @@ setup: "suggest_context": "type" : "completion" "contexts": - - "name" : "color" + - + "name" : "color" "type" : "category" "suggest_context_with_path": "type" : "completion" "contexts": - - "name" : "color" + - + "name" : "color" "type" : "category" "path" : "color" "suggest_geo": "type" : "completion" "contexts": - - "name" : "location" + - + "name" : "location" "type" : "geo" "precision" : "5km" "suggest_multi_contexts": "type" : "completion" "contexts": - - "name" : "location" + - + "name" : "location" "type" : "geo" "precision" : "5km" "path" : "location" - - "name" : "color" + - + "name" : "color" "type" : "category" "path" : "color" From ce1a9a2b0625ff13d0ccc7097023a85c0e4bd5db Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 13 Oct 2016 10:18:31 +0200 Subject: [PATCH 31/53] [TEST] Add test that filtered alias with date math isn't cached by the request cache --- .../indices/IndicesRequestCacheIT.java | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 078bf499ff4..07dd5d016c1 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -19,9 +19,11 @@ package org.elasticsearch.indices; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -30,6 +32,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.chrono.ISOChronology; +import org.joda.time.format.DateTimeFormat; import java.util.List; @@ -441,4 +444,56 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { equalTo(5L)); } + public void testCacheWithFilteredAlias() { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "created_at", "type=date") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS, + 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .addAlias(new Alias("last_week").filter(QueryBuilders.rangeQuery("created_at").gte("now-7d/d"))) + .get()); + DateTime now = new DateTime(DateTimeZone.UTC); + client().prepareIndex("index", "type", "1").setRouting("1").setSource("created_at", + DateTimeFormat.forPattern("YYYY-MM-dd").print(now)).get(); + refresh(); + + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + // filtered alias is handled differently and must not be cached at this point + r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + } + } From 75c9e4f4182fa714b69235bf5b8709fac0a627ed Mon Sep 17 00:00:00 2001 From: Jun Ohtani Date: Fri, 7 Oct 2016 20:53:43 +0900 Subject: [PATCH 32/53] IndexSettings should not be Null in Mapper.BuildContext Remove Nullable notation Add unit test Closes #20174 --- .../elasticsearch/index/mapper/Mapper.java | 6 +-- .../index/mapper/MapperTests.java | 44 +++++++++++++++++++ 2 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 06928566424..d45283a9416 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -39,6 +39,7 @@ public abstract class Mapper implements ToXContent, Iterable { private final ContentPath contentPath; public BuilderContext(Settings indexSettings, ContentPath contentPath) { + assert indexSettings != null; this.contentPath = contentPath; this.indexSettings = indexSettings; } @@ -47,16 +48,11 @@ public abstract class Mapper implements ToXContent, Iterable { return this.contentPath; } - @Nullable public Settings indexSettings() { return this.indexSettings; } - @Nullable public Version indexCreatedVersion() { - if (indexSettings == null) { - return null; - } return Version.indexCreated(indexSettings); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java new file mode 100644 index 00000000000..b5979db094c --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class MapperTests extends ESTestCase { + + public void testBuilderContextWithIndexSettings() { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath(1)); + + assertNotNull(context.indexSettings()); + assertThat(context.indexSettings(), equalTo(settings)); + } + + public void testBuilderContextWithIndexSettingsAsNull() { + AssertionError e = expectThrows(AssertionError.class, () -> new Mapper.BuilderContext(null, new ContentPath(1))); + } + + +} From 43d11600ab0ccc595313a3a4b3696a4c20b8c602 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 13 Oct 2016 11:46:54 +0200 Subject: [PATCH 33/53] [TEST] Add test that shows how to use a classic pull-parser with the object parser --- .../common/xcontent/ObjectParserTests.java | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 2cc4889be9d..733d3d1775d 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -113,6 +115,82 @@ public class ObjectParserTests extends ESTestCase { } + /** + * This test ensures we can use a classic pull-parsing parser + * together with the object parser + */ + public void testUseClassicPullParsingSubParser() throws IOException { + class ClassicParser { + URI parseURI(XContentParser parser) throws IOException { + String fieldName = null; + String host = ""; + int port = 0; + XContentParser.Token token; + while (( token = parser.currentToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING){ + if (fieldName.equals("host")) { + host = parser.text(); + } else { + throw new IllegalStateException("boom"); + } + } else if (token == XContentParser.Token.VALUE_NUMBER){ + if (fieldName.equals("port")) { + port = parser.intValue(); + } else { + throw new IllegalStateException("boom"); + } + } + parser.nextToken(); + } + return URI.create(host + ":" + port); + } + } + class Foo { + public String name; + public URI uri; + public void setName(String name) { + this.name = name; + } + + public void setURI(URI uri) { + this.uri = uri; + } + } + + class CustomParseFieldMatchSupplier implements ParseFieldMatcherSupplier { + + public final ClassicParser parser; + + CustomParseFieldMatchSupplier(ClassicParser parser) { + this.parser = parser; + } + + @Override + public ParseFieldMatcher getParseFieldMatcher() { + return ParseFieldMatcher.EMPTY; + } + + public URI parseURI(XContentParser parser) { + try { + return this.parser.parseURI(parser); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + XContentParser parser = XContentType.JSON.xContent() + .createParser("{\"url\" : { \"host\": \"http://foobar\", \"port\" : 80}, \"name\" : \"foobarbaz\"}"); + ObjectParser objectParser = new ObjectParser<>("foo"); + objectParser.declareString(Foo::setName, new ParseField("name")); + objectParser.declareObjectOrDefault(Foo::setURI, (p, s) -> s.parseURI(p), () -> null, new ParseField("url")); + Foo s = objectParser.parse(parser, new Foo(), new CustomParseFieldMatchSupplier(new ClassicParser())); + assertEquals(s.uri.getHost(), "foobar"); + assertEquals(s.uri.getPort(), 80); + assertEquals(s.name, "foobarbaz"); + } + public void testExceptions() throws IOException { XContentParser parser = XContentType.JSON.xContent().createParser("{\"test\" : \"foo\"}"); class TestStruct { From 101ea0549a824e49c1ed6040408e9a96db6fdd0e Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 13 Oct 2016 12:12:53 +0200 Subject: [PATCH 34/53] Add note about retention periods to reindex-upgrade docs --- docs/reference/setup/reindex_upgrade.asciidoc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/reference/setup/reindex_upgrade.asciidoc b/docs/reference/setup/reindex_upgrade.asciidoc index 8ee9c61e424..f9e7a60ee5b 100644 --- a/docs/reference/setup/reindex_upgrade.asciidoc +++ b/docs/reference/setup/reindex_upgrade.asciidoc @@ -19,6 +19,20 @@ If you are running an Elasticsearch 2.x cluster or older, you have two options: * Create a new 6.x cluster and use reindex-from-remote to import indices directly from the 2.x cluster. See <>. +.Time-based indices and retention periods +******************************************* + +For many use cases with time-based indices, you will not need to worry about +carrying old 2.x indices with you to 6.x. Data in time-based indices usually +becomes less interesting as time passes. Old indices can be deleted once they +fall outside of your retention period. + +Users in this position can continue to use 5.x until all old 2.x indices have +been deleted, then upgrade to 6.x directly. + +******************************************* + + [[reindex-upgrade-inplace]] ==== Reindex in place From 85094d91904b2926aba117bf21ab3ebae2bbbbd5 Mon Sep 17 00:00:00 2001 From: Uli Fahrer Date: Thu, 13 Oct 2016 13:06:53 +0200 Subject: [PATCH 35/53] Fix wrong heading Relates #20906 --- .../aggregations/metrics/geobounds-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc index e49962a58f7..ef91d0b7000 100644 --- a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc +++ b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc @@ -1,5 +1,5 @@ [[java-aggs-metrics-geobounds]] -==== Cardinality Aggregation +==== Geo Bounds Aggregation Here is how you can use {ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds Aggregation] From fdceb64072307c747b1c0b145775ef592cea42bd Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 13 Oct 2016 09:27:31 -0400 Subject: [PATCH 36/53] Use TimveValue instead of long for CacheBuilder methods This changes the CacheBuilder methods that are used to set expiration times to accept a TimeValue instead of long. Accepting a long can lead to issues where the incorrect value is passed in as the time unit is not clearly identified. By using TimeValue the caller no longer needs to worry about the time unit used by the cache or builder. --- .../org/elasticsearch/common/cache/Cache.java | 34 ++++++++----- .../common/cache/CacheBuilder.java | 42 +++++++++++----- .../indices/IndicesRequestCache.java | 3 +- .../elasticsearch/script/ScriptService.java | 2 +- .../common/cache/CacheBuilderTests.java | 50 +++++++++++++++++++ .../common/cache/CacheTests.java | 6 +-- 6 files changed, 107 insertions(+), 30 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/common/cache/Cache.java b/core/src/main/java/org/elasticsearch/common/cache/Cache.java index a42d01ccf72..cf8b58d0271 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/core/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -67,13 +67,13 @@ import java.util.function.ToLongBiFunction; */ public class Cache { // positive if entries have an expiration - private long expireAfterAccess = -1; + private long expireAfterAccessNanos = -1; // true if entries can expire after access private boolean entriesExpireAfterAccess; // positive if entries have an expiration after write - private long expireAfterWrite = -1; + private long expireAfterWriteNanos = -1; // true if entries can expire after initial insertion private boolean entriesExpireAfterWrite; @@ -98,22 +98,32 @@ public class Cache { Cache() { } - void setExpireAfterAccess(long expireAfterAccess) { - if (expireAfterAccess <= 0) { - throw new IllegalArgumentException("expireAfterAccess <= 0"); + void setExpireAfterAccessNanos(long expireAfterAccessNanos) { + if (expireAfterAccessNanos <= 0) { + throw new IllegalArgumentException("expireAfterAccessNanos <= 0"); } - this.expireAfterAccess = expireAfterAccess; + this.expireAfterAccessNanos = expireAfterAccessNanos; this.entriesExpireAfterAccess = true; } - void setExpireAfterWrite(long expireAfterWrite) { - if (expireAfterWrite <= 0) { - throw new IllegalArgumentException("expireAfterWrite <= 0"); + // pkg-private for testing + long getExpireAfterAccessNanos() { + return this.expireAfterAccessNanos; + } + + void setExpireAfterWriteNanos(long expireAfterWriteNanos) { + if (expireAfterWriteNanos <= 0) { + throw new IllegalArgumentException("expireAfterWriteNanos <= 0"); } - this.expireAfterWrite = expireAfterWrite; + this.expireAfterWriteNanos = expireAfterWriteNanos; this.entriesExpireAfterWrite = true; } + // pkg-private for testing + long getExpireAfterWriteNanos() { + return this.expireAfterWriteNanos; + } + void setMaximumWeight(long maximumWeight) { if (maximumWeight < 0) { throw new IllegalArgumentException("maximumWeight < 0"); @@ -696,8 +706,8 @@ public class Cache { } private boolean isExpired(Entry entry, long now) { - return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccess) || - (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWrite); + return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccessNanos) || + (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWriteNanos); } private boolean unlink(Entry entry) { diff --git a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java index ffb0e591180..67c8d508ba5 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java @@ -19,13 +19,15 @@ package org.elasticsearch.common.cache; +import org.elasticsearch.common.unit.TimeValue; + import java.util.Objects; import java.util.function.ToLongBiFunction; public class CacheBuilder { private long maximumWeight = -1; - private long expireAfterAccess = -1; - private long expireAfterWrite = -1; + private long expireAfterAccessNanos = -1; + private long expireAfterWriteNanos = -1; private ToLongBiFunction weigher; private RemovalListener removalListener; @@ -44,19 +46,35 @@ public class CacheBuilder { return this; } - public CacheBuilder setExpireAfterAccess(long expireAfterAccess) { - if (expireAfterAccess <= 0) { + /** + * Sets the amount of time before an entry in the cache expires after it was last accessed. + * + * @param expireAfterAccess The amount of time before an entry expires after it was last accessed. Must not be {@code null} and must + * be greater than 0. + */ + public CacheBuilder setExpireAfterAccess(TimeValue expireAfterAccess) { + Objects.requireNonNull(expireAfterAccess); + final long expireAfterAccessNanos = expireAfterAccess.getNanos(); + if (expireAfterAccessNanos <= 0) { throw new IllegalArgumentException("expireAfterAccess <= 0"); } - this.expireAfterAccess = expireAfterAccess; + this.expireAfterAccessNanos = expireAfterAccessNanos; return this; } - public CacheBuilder setExpireAfterWrite(long expireAfterWrite) { - if (expireAfterWrite <= 0) { + /** + * Sets the amount of time before an entry in the cache expires after it was written. + * + * @param expireAfterWrite The amount of time before an entry expires after it was written. Must not be {@code null} and must be + * greater than 0. + */ + public CacheBuilder setExpireAfterWrite(TimeValue expireAfterWrite) { + Objects.requireNonNull(expireAfterWrite); + final long expireAfterWriteNanos = expireAfterWrite.getNanos(); + if (expireAfterWriteNanos <= 0) { throw new IllegalArgumentException("expireAfterWrite <= 0"); } - this.expireAfterWrite = expireAfterWrite; + this.expireAfterWriteNanos = expireAfterWriteNanos; return this; } @@ -77,11 +95,11 @@ public class CacheBuilder { if (maximumWeight != -1) { cache.setMaximumWeight(maximumWeight); } - if (expireAfterAccess != -1) { - cache.setExpireAfterAccess(expireAfterAccess); + if (expireAfterAccessNanos != -1) { + cache.setExpireAfterAccessNanos(expireAfterAccessNanos); } - if (expireAfterWrite != -1) { - cache.setExpireAfterWrite(expireAfterWrite); + if (expireAfterWriteNanos != -1) { + cache.setExpireAfterWriteNanos(expireAfterWriteNanos); } if (weigher != null) { cache.setWeigher(weigher); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index ff3713a374f..a08f9ca1ad4 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -47,7 +47,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -90,7 +89,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo CacheBuilder cacheBuilder = CacheBuilder.builder() .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); if (expire != null) { - cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); + cacheBuilder.setExpireAfterAccess(expire); } cache = cacheBuilder.build(); } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 24cb816fb15..2d6e07d12ee 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -136,7 +136,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust TimeValue cacheExpire = SCRIPT_CACHE_EXPIRE_SETTING.get(settings); if (cacheExpire.getNanos() != 0) { - cacheBuilder.setExpireAfterAccess(cacheExpire.nanos()); + cacheBuilder.setExpireAfterAccess(cacheExpire); } logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire); diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java new file mode 100644 index 00000000000..e0a5786e184 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.cache; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class CacheBuilderTests extends ESTestCase { + + public void testSettingExpireAfterAccess() { + IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.MINUS_ONE)); + assertThat(iae.getMessage(), containsString("expireAfterAccess <=")); + iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.ZERO)); + assertThat(iae.getMessage(), containsString("expireAfterAccess <=")); + final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), ""); + Cache cache = CacheBuilder.builder().setExpireAfterAccess(timeValue).build(); + assertEquals(timeValue.getNanos(), cache.getExpireAfterAccessNanos()); + } + + public void testSettingExpireAfterWrite() { + IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.MINUS_ONE)); + assertThat(iae.getMessage(), containsString("expireAfterWrite <=")); + iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.ZERO)); + assertThat(iae.getMessage(), containsString("expireAfterWrite <=")); + final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), ""); + Cache cache = CacheBuilder.builder().setExpireAfterWrite(timeValue).build(); + assertEquals(timeValue.getNanos(), cache.getExpireAfterWriteNanos()); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 3b88a3bdcfe..d8dbaa673a0 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -228,7 +228,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterAccess(1); + cache.setExpireAfterAccessNanos(1); List evictedKeys = new ArrayList<>(); cache.setRemovalListener(notification -> { assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); @@ -265,7 +265,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterWrite(1); + cache.setExpireAfterWriteNanos(1); List evictedKeys = new ArrayList<>(); cache.setRemovalListener(notification -> { assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); @@ -307,7 +307,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterAccess(1); + cache.setExpireAfterAccessNanos(1); now.set(0); for (int i = 0; i < numberOfEntries; i++) { cache.put(i, Integer.toString(i)); From 61fd1cd582788a0196982b9ea1e4c31bda8c24b9 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 13 Oct 2016 16:07:31 +0200 Subject: [PATCH 37/53] Make AbstractSearchAsyncAction more testable and add a basic test case (#20890) `AbstractSearchAsyncAction` has only been tested in integration tests. The infrastructure is rather critical and should be tested on a unit-test level. This change takes the first step. --- .../action/search/AbstractAsyncAction.java | 6 +- .../search/AbstractSearchAsyncAction.java | 91 +++----- .../SearchDfsQueryAndFetchAsyncAction.java | 25 ++- .../SearchDfsQueryThenFetchAsyncAction.java | 26 ++- .../SearchQueryAndFetchAsyncAction.java | 23 +- .../SearchQueryThenFetchAsyncAction.java | 25 ++- .../action/search/TransportSearchAction.java | 80 +++++-- .../cluster/routing/OperationRouting.java | 5 - .../action/search/SearchAsyncActionTests.java | 211 ++++++++++++++++++ 9 files changed, 368 insertions(+), 124 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java index 3ce14d8dacd..96db19d5472 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java @@ -26,8 +26,10 @@ abstract class AbstractAsyncAction { private final long startTime; - protected AbstractAsyncAction() { - this.startTime = System.currentTimeMillis(); + protected AbstractAsyncAction() { this(System.currentTimeMillis());} + + protected AbstractAsyncAction(long startTime) { + this.startTime = startTime; } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 6cb68b8e9be..b9f4120844c 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -27,15 +27,10 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; @@ -45,12 +40,12 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.threadpool.ThreadPool; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest; @@ -58,73 +53,45 @@ abstract class AbstractSearchAsyncAction protected final Logger logger; protected final SearchTransportService searchTransportService; - private final IndexNameExpressionResolver indexNameExpressionResolver; - protected final SearchPhaseController searchPhaseController; - protected final ThreadPool threadPool; + private final Executor executor; protected final ActionListener listener; - protected final GroupShardsIterator shardsIts; + private final GroupShardsIterator shardsIts; protected final SearchRequest request; - protected final ClusterState clusterState; - protected final DiscoveryNodes nodes; + /** Used by subclasses to resolve node ids to DiscoveryNodes. **/ + protected final Function nodeIdToDiscoveryNode; protected final int expectedSuccessfulOps; private final int expectedTotalOps; protected final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger totalOps = new AtomicInteger(); protected final AtomicArray firstResults; + private final Map perIndexFilteringAliases; + private final long clusterStateVersion; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); protected volatile ScoreDoc[] sortedShardDocs; - protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, - IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, - ActionListener listener) { + protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, + Function nodeIdToDiscoveryNode, + Map perIndexFilteringAliases, Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, long startTime, + long clusterStateVersion) { + super(startTime); this.logger = logger; this.searchTransportService = searchTransportService; - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.searchPhaseController = searchPhaseController; - this.threadPool = threadPool; + this.executor = executor; this.request = request; this.listener = listener; - - this.clusterState = clusterService.state(); - nodes = clusterState.nodes(); - - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name - // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead - // of just for the _search api - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(), - startTime(), request.indices()); - - for (String index : concreteIndices) { - clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); - } - - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), - request.indices()); - - shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); - final int shardCount = shardsIts.size(); - failIfOverShardCountLimit(clusterService, shardCount); - expectedSuccessfulOps = shardCount; + this.perIndexFilteringAliases = perIndexFilteringAliases; + this.nodeIdToDiscoveryNode = nodeIdToDiscoveryNode; + this.clusterStateVersion = clusterStateVersion; + this.shardsIts = shardsIts; + expectedSuccessfulOps = shardsIts.size(); // we need to add 1 for non active partition, since we count it in the total! expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); - firstResults = new AtomicArray<>(shardsIts.size()); } - private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { - final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING); - if (shardCount > shardCountLimit) { - throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " - + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " - + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " - + "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey() - + "] to a greater value if you really want to query that many shards at the same time."); - } - } + public void start() { if (expectedSuccessfulOps == 0) { @@ -152,12 +119,11 @@ abstract class AbstractSearchAsyncAction // no more active shards... (we should not really get here, but just for safety) onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - final DiscoveryNode node = nodes.get(shard.currentNodeId()); + final DiscoveryNode node = nodeIdToDiscoveryNode.apply(shard.currentNodeId()); if (node == null) { onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, - shard.index().getName(), request.indices()); + String[] filteringAliases = perIndexFilteringAliases.get(shard.index().getName()); sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener() { @Override @@ -319,7 +285,7 @@ abstract class AbstractSearchAsyncAction private void raiseEarlyFailure(Exception e) { for (AtomicArray.Entry entry : firstResults.asList()) { try { - DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().nodeId()); sendReleaseSearchContext(entry.value.id(), node); } catch (Exception inner) { inner.addSuppressed(e); @@ -344,7 +310,7 @@ abstract class AbstractSearchAsyncAction if (queryResult.hasHits() && docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs try { - DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId()); sendReleaseSearchContext(entry.value.queryResult().id(), node); } catch (Exception e) { logger.trace("failed to release context", e); @@ -402,7 +368,7 @@ abstract class AbstractSearchAsyncAction sb.append(result.shardTarget()); } - logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version()); + logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterStateVersion); } moveToSecondPhase(); } @@ -410,4 +376,9 @@ abstract class AbstractSearchAsyncAction protected abstract void moveToSecondPhase() throws Exception; protected abstract String firstPhaseName(); + + protected Executor getExecutor() { + return executor; + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index ba73b0f4bea..171a97947b0 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -24,9 +24,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -34,21 +33,25 @@ import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { private final AtomicArray queryFetchResults; - + private final SearchPhaseController searchPhaseController; SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map perIndexFilteringAliases, SearchPhaseController searchPhaseController, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, + request, listener, shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; queryFetchResults = new AtomicArray<>(firstResults.length()); } @@ -70,7 +73,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest); } @@ -115,7 +118,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index ccd646ae129..a0a5035335f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -26,9 +26,8 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -39,23 +38,28 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { final AtomicArray queryResults; final AtomicArray fetchResults; final AtomicArray docIdsToLoad; + private final SearchPhaseController searchPhaseController; SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map perIndexFilteringAliases, SearchPhaseController searchPhaseController, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, + request, listener, shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; queryResults = new AtomicArray<>(firstResults.length()); fetchResults = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length()); @@ -78,7 +82,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } @@ -149,7 +153,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); - DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } @@ -192,7 +196,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index d799bc26764..31372838142 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -22,24 +22,29 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.function.Function; class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { + private final SearchPhaseController searchPhaseController; + SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, Map perIndexFilteringAliases, + SearchPhaseController searchPhaseController, Executor executor, + SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, + request, listener, shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; } @Override @@ -55,7 +60,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 6df2bb3f87e..edf651e1f2a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -26,9 +26,8 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -36,21 +35,27 @@ import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { final AtomicArray fetchResults; final AtomicArray docIdsToLoad; + private final SearchPhaseController searchPhaseController; - SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener); + SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, + Function nodeIdToDiscoveryNode, Map perIndexFilteringAliases, + SearchPhaseController searchPhaseController, Executor executor, + SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, request, listener, + shardsIts, startTime, clusterStateVersion); + this.searchPhaseController = searchPhaseController; fetchResults = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length()); } @@ -82,7 +87,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResultProvider queryResult = firstResults.get(entry.index); - DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } @@ -125,7 +130,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 9b9ca48fc33..54105dc82dc 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -23,7 +23,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; @@ -37,8 +41,12 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.concurrent.Executor; +import java.util.function.Function; import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; @@ -67,14 +75,33 @@ public class TransportSearchAction extends HandledTransportAction listener) { + // pure paranoia if time goes backwards we are at least positive + final long startTimeInMillis = Math.max(0, System.currentTimeMillis()); + ClusterState clusterState = clusterService.state(); + clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + + // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name + // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead + // of just for the _search api + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest.indicesOptions(), + startTimeInMillis, searchRequest.indices()); + Map filteringAliasLookup = new HashMap<>(); + + for (String index : concreteIndices) { + clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); + filteringAliasLookup.put(index, indexNameExpressionResolver.filteringAliases(clusterState, + index, searchRequest.indices())); + } + + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), + searchRequest.indices()); + GroupShardsIterator shardIterators = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, + searchRequest.preference()); + failIfOverShardCountLimit(clusterService, shardIterators.size()); + // optimize search type for cases where there is only one shard group to search on try { - ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest); - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, - searchRequest.routing(), searchRequest.indices()); - int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap); - if (shardCount == 1) { + if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_AND_FETCH); } @@ -95,27 +122,37 @@ public class TransportSearchAction extends HandledTransportAction listener) { + private AbstractSearchAsyncAction searchAsyncAction(SearchRequest searchRequest, GroupShardsIterator shardIterators, long startTime, + ClusterState state, Map filteringAliasLookup, + ActionListener listener) { + final Function nodesLookup = state.nodes()::get; + final long clusterStateVersion = state.version(); + Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); AbstractSearchAsyncAction searchAsyncAction; switch(searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: - searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + filteringAliasLookup, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; case QUERY_THEN_FETCH: - searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + filteringAliasLookup, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; case DFS_QUERY_AND_FETCH: - searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + filteringAliasLookup, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; case QUERY_AND_FETCH: - searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + filteringAliasLookup, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion); break; default: throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); @@ -123,4 +160,15 @@ public class TransportSearchAction extends HandledTransportAction shardCountLimit) { + throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " + + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " + + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + + "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey() + + "] to a greater value if you really want to query that many shards at the same time."); + } + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 10a29963b63..94cb4b8c8e8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -68,11 +68,6 @@ public class OperationRouting extends AbstractComponent { return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference); } - public int searchShardsCount(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { - final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); - return shards.size(); - } - public GroupShardsIterator searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); final Set set = new HashSet<>(shards.size()); diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java new file mode 100644 index 00000000000..0cd8015a374 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.PlainShardIterator; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ShardSearchTransportRequest; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class SearchAsyncActionTests extends ESTestCase { + + public void testFanOutAndCollect() throws InterruptedException { + SearchRequest request = new SearchRequest(); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference response = new AtomicReference<>(); + ActionListener responseListener = new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + response.set((TestSearchResponse) searchResponse); + } + + @Override + public void onFailure(Exception e) { + logger.warn("test failed", e); + fail(e.getMessage()); + } + }; + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + + Map> nodeToContextMap = new HashMap<>(); + AtomicInteger contextIdGenerator = new AtomicInteger(0); + GroupShardsIterator shardsIter = getShardsIter("idx", randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode); + AtomicInteger numFreedContext = new AtomicInteger(); + SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null) { + @Override + public void sendFreeContext(DiscoveryNode node, long contextId, SearchRequest request) { + numFreedContext.incrementAndGet(); + assertTrue(nodeToContextMap.containsKey(node)); + assertTrue(nodeToContextMap.get(node).remove(contextId)); + } + }; + Map lookup = new HashMap<>(); + lookup.put(primaryNode.getId(), primaryNode); + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction(logger, transportService, lookup::get, + Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0) { + TestSearchResponse response = new TestSearchResponse(); + + @Override + protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener listener) { + assertTrue("shard: " + request.shardId() + " has been queried twice", response.queried.add(request.shardId())); + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(), node); + Set ids = nodeToContextMap.computeIfAbsent(node, (n) -> new HashSet<>()); + ids.add(testSearchPhaseResult.id); + if (randomBoolean()) { + listener.onResponse(testSearchPhaseResult); + } else { + new Thread(() -> listener.onResponse(testSearchPhaseResult)).start(); + } + } + + @Override + protected void moveToSecondPhase() throws Exception { + for (int i = 0; i < firstResults.length(); i++) { + TestSearchPhaseResult result = firstResults.get(i); + assertEquals(result.node.getId(), result.shardTarget().getNodeId()); + sendReleaseSearchContext(result.id(), result.node); + } + responseListener.onResponse(response); + latch.countDown(); + } + + @Override + protected String firstPhaseName() { + return "test"; + } + + @Override + protected Executor getExecutor() { + fail("no executor in this class"); + return null; + } + }; + asyncAction.start(); + latch.await(); + assertNotNull(response.get()); + assertFalse(nodeToContextMap.isEmpty()); + assertTrue(nodeToContextMap.containsKey(primaryNode)); + assertEquals(shardsIter.size(), numFreedContext.get()); + assertTrue(nodeToContextMap.get(primaryNode).toString(), nodeToContextMap.get(primaryNode).isEmpty()); + + } + + private GroupShardsIterator getShardsIter(String index, int numShards, boolean doReplicas, DiscoveryNode primaryNode, + DiscoveryNode replicaNode) { + ArrayList list = new ArrayList<>(); + for (int i = 0; i < numShards; i++) { + ArrayList started = new ArrayList<>(); + ArrayList initializing = new ArrayList<>(); + ArrayList unassigned = new ArrayList<>(); + + ShardRouting routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), true, + RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + routing = routing.initialize(primaryNode.getId(), i + "p", 0); + routing.started(); + started.add(routing); + if (doReplicas) { + routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), false, + RecoverySource.PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + if (replicaNode != null) { + routing = routing.initialize(replicaNode.getId(), i + "r", 0); + if (randomBoolean()) { + routing.started(); + started.add(routing); + } else { + initializing.add(routing); + } + } else { + unassigned.add(routing); // unused yet + } + } + Collections.shuffle(started, random()); + started.addAll(initializing); + list.add(new PlainShardIterator(new ShardId(new Index(index, "_na_"), i), started)); + } + return new GroupShardsIterator(list); + } + + public static class TestSearchResponse extends SearchResponse { + public final Set queried = new HashSet<>(); + } + + public static class TestSearchPhaseResult implements SearchPhaseResult { + final long id; + final DiscoveryNode node; + SearchShardTarget shardTarget; + + public TestSearchPhaseResult(long id, DiscoveryNode node) { + this.id = id; + this.node = node; + } + + @Override + public long id() { + return id; + } + + @Override + public SearchShardTarget shardTarget() { + return this.shardTarget; + } + + @Override + public void shardTarget(SearchShardTarget shardTarget) { + this.shardTarget = shardTarget; + + } + + @Override + public void readFrom(StreamInput in) throws IOException { + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + } +} From e71c30c71df92c5d55a818daf0bbf103a8cc88cc Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 13 Oct 2016 16:17:28 +0200 Subject: [PATCH 38/53] Mustache: Add {{#url}}{{/url}} function to URL encode strings (#20838) This commit adds a new Mustache function (codename: url) and a new URLEncoder that can be used to URL encode strings in mustache templates. --- .../mustache/CustomMustacheFactory.java | 138 +++++++++++++++--- .../mustache/MustacheScriptEngineService.java | 15 +- .../mustache/CustomMustacheFactoryTests.java | 97 ++++++++++++ .../mustache/MustacheScriptEngineTests.java | 2 +- .../script/mustache/MustacheTests.java | 60 +++++--- .../lang_mustache/25_custom_functions.yaml | 43 ++++++ 6 files changed, 305 insertions(+), 50 deletions(-) create mode 100644 modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java create mode 100644 modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java index 8419730dc1c..95bae3732e5 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java @@ -27,41 +27,75 @@ import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheException; import com.github.mustachejava.MustacheVisitor; import com.github.mustachejava.TemplateContext; +import com.github.mustachejava.codes.DefaultMustache; import com.github.mustachejava.codes.IterableCode; import com.github.mustachejava.codes.WriteCode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.io.StringWriter; import java.io.Writer; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.StringJoiner; -import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; public class CustomMustacheFactory extends DefaultMustacheFactory { - private final BiConsumer encoder; + static final String CONTENT_TYPE_PARAM = "content_type"; - public CustomMustacheFactory(boolean escaping) { + static final String JSON_MIME_TYPE = "application/json"; + static final String PLAIN_TEXT_MIME_TYPE = "text/plain"; + static final String X_WWW_FORM_URLENCODED_MIME_TYPE = "application/x-www-form-urlencoded"; + + private static final String DEFAULT_MIME_TYPE = JSON_MIME_TYPE; + + private static final Map> ENCODERS; + static { + Map> encoders = new HashMap<>(); + encoders.put(JSON_MIME_TYPE, JsonEscapeEncoder::new); + encoders.put(PLAIN_TEXT_MIME_TYPE, DefaultEncoder::new); + encoders.put(X_WWW_FORM_URLENCODED_MIME_TYPE, UrlEncoder::new); + ENCODERS = Collections.unmodifiableMap(encoders); + } + + private final Encoder encoder; + + public CustomMustacheFactory(String mimeType) { super(); setObjectHandler(new CustomReflectionObjectHandler()); - if (escaping) { - this.encoder = new JsonEscapeEncoder(); - } else { - this.encoder = new NoEscapeEncoder(); - } + this.encoder = createEncoder(mimeType); + } + + public CustomMustacheFactory() { + this(DEFAULT_MIME_TYPE); } @Override public void encode(String value, Writer writer) { - encoder.accept(value, writer); + try { + encoder.encode(value, writer); + } catch (IOException e) { + throw new MustacheException("Unable to encode value", e); + } + } + + static Encoder createEncoder(String mimeType) { + Supplier supplier = ENCODERS.get(mimeType); + if (supplier == null) { + throw new IllegalArgumentException("No encoder found for MIME type [" + mimeType + "]"); + } + return supplier.get(); } @Override @@ -83,6 +117,8 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { list.add(new JoinerCode(templateContext, df, mustache)); } else if (CustomJoinerCode.match(variable)) { list.add(new CustomJoinerCode(templateContext, df, mustache, variable)); + } else if (UrlEncoderCode.match(variable)) { + list.add(new UrlEncoderCode(templateContext, df, mustache, variable)); } else { list.add(new IterableCode(templateContext, df, mustache, variable)); } @@ -253,27 +289,85 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { } } - class NoEscapeEncoder implements BiConsumer { + /** + * This function encodes a string using the {@link URLEncoder#encode(String, String)} method + * with the UTF-8 charset. + */ + static class UrlEncoderCode extends DefaultMustache { + + private static final String CODE = "url"; + private final Encoder encoder; + + public UrlEncoderCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String variable) { + super(tc, df, mustache.getCodes(), variable); + this.encoder = new UrlEncoder(); + } @Override - public void accept(String s, Writer writer) { - try { - writer.write(s); - } catch (IOException e) { - throw new MustacheException("Failed to encode value: " + s); + public Writer run(Writer writer, List scopes) { + if (getCodes() != null) { + for (Code code : getCodes()) { + try (StringWriter capture = new StringWriter()) { + code.execute(capture, scopes); + + String s = capture.toString(); + if (s != null) { + encoder.encode(s, writer); + } + } catch (IOException e) { + throw new MustacheException("Exception while parsing mustache function at line " + tc.line(), e); + } + } } + return writer; + } + + static boolean match(String variable) { + return CODE.equalsIgnoreCase(variable); } } - class JsonEscapeEncoder implements BiConsumer { + @FunctionalInterface + interface Encoder { + /** + * Encodes the {@code s} string and writes it to the {@code writer} {@link Writer}. + * + * @param s The string to encode + * @param writer The {@link Writer} to which the encoded string will be written to + */ + void encode(final String s, final Writer writer) throws IOException; + } + + /** + * Encoder that simply writes the string to the writer without encoding. + */ + static class DefaultEncoder implements Encoder { @Override - public void accept(String s, Writer writer) { - try { - writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); - } catch (IOException e) { - throw new MustacheException("Failed to escape and encode value: " + s); - } + public void encode(String s, Writer writer) throws IOException { + writer.write(s); + } + } + + /** + * Encoder that escapes JSON string values/fields. + */ + static class JsonEscapeEncoder implements Encoder { + + @Override + public void encode(String s, Writer writer) throws IOException { + writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); + } + } + + /** + * Encoder that escapes strings using HTML form encoding + */ + static class UrlEncoder implements Encoder { + + @Override + public void encode(String s, Writer writer) throws IOException { + writer.write(URLEncoder.encode(s, StandardCharsets.UTF_8.name())); } } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index b7d7087373c..b2bc514327c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -43,6 +43,8 @@ import java.security.PrivilegedAction; import java.util.Collections; import java.util.Map; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM; + /** * Main entry point handling template registration, compilation and * execution. @@ -55,10 +57,6 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme public static final String NAME = "mustache"; - static final String CONTENT_TYPE_PARAM = "content_type"; - static final String JSON_CONTENT_TYPE = "application/json"; - static final String PLAIN_TEXT_CONTENT_TYPE = "text/plain"; - /** Thread local UTF8StreamWriter to store template execution results in, thread local to save object creation.*/ private static ThreadLocal> utf8StreamWriter = new ThreadLocal<>(); @@ -91,13 +89,16 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme * */ @Override public Object compile(String templateName, String templateSource, Map params) { - final MustacheFactory factory = new CustomMustacheFactory(isJsonEscapingEnabled(params)); + final MustacheFactory factory = createMustacheFactory(params); Reader reader = new FastStringReader(templateSource); return factory.compile(reader, "query-template"); } - private boolean isJsonEscapingEnabled(Map params) { - return JSON_CONTENT_TYPE.equals(params.getOrDefault(CONTENT_TYPE_PARAM, JSON_CONTENT_TYPE)); + private CustomMustacheFactory createMustacheFactory(Map params) { + if (params == null || params.isEmpty() || params.containsKey(CONTENT_TYPE_PARAM) == false) { + return new CustomMustacheFactory(); + } + return new CustomMustacheFactory(params.get(CONTENT_TYPE_PARAM)); } @Override diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java new file mode 100644 index 00000000000..fefa98e8f86 --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import com.github.mustachejava.Mustache; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.JSON_MIME_TYPE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.PLAIN_TEXT_MIME_TYPE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.X_WWW_FORM_URLENCODED_MIME_TYPE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class CustomMustacheFactoryTests extends ESTestCase { + + public void testCreateEncoder() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder(null)); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type [null]")); + + e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder("")); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type []")); + + e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder("test")); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type [test]")); + + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.JSON_MIME_TYPE), + instanceOf(CustomMustacheFactory.JsonEscapeEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.PLAIN_TEXT_MIME_TYPE), + instanceOf(CustomMustacheFactory.DefaultEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.X_WWW_FORM_URLENCODED_MIME_TYPE), + instanceOf(CustomMustacheFactory.UrlEncoder.class)); + } + + public void testJsonEscapeEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = randomBoolean() ? singletonMap(CONTENT_TYPE_PARAM, JSON_MIME_TYPE) : emptyMap(); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "a \"value\"")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"a \\\"value\\\"\"}")); + } + + public void testDefaultEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_MIME_TYPE); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "a \"value\"")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"a \"value\"\"}")); + } + + public void testUrlEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = singletonMap(CONTENT_TYPE_PARAM, X_WWW_FORM_URLENCODED_MIME_TYPE); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "tilde~ AND date:[2016 FROM*]")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"tilde%7E+AND+date%3A%5B2016+FROM*%5D\"}")); + } +} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index 693ada174b9..b9f596e4d3d 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -49,7 +49,7 @@ public class MustacheScriptEngineTests extends ESTestCase { @Before public void setup() { qe = new MustacheScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - factory = new CustomMustacheFactory(true); + factory = new CustomMustacheFactory(); } public void testSimpleParameterReplace() { diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java index 9b48afe834a..becdda0e592 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java @@ -30,6 +30,8 @@ import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -43,8 +45,6 @@ import static java.util.Collections.singleton; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; -import static org.elasticsearch.script.mustache.MustacheScriptEngineService.CONTENT_TYPE_PARAM; -import static org.elasticsearch.script.mustache.MustacheScriptEngineService.PLAIN_TEXT_CONTENT_TYPE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -144,24 +144,6 @@ public class MustacheTests extends ESTestCase { assertThat(bytes.utf8ToString(), both(containsString("foo")).and(containsString("bar"))); } - public void testEscaping() { - // json string escaping enabled: - Mustache mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); - ExecutableScript executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); - BytesReference rawResult = (BytesReference) executableScript.run(); - String result = rawResult.utf8ToString(); - assertThat(result, equalTo("{ \"field1\": \"a \\\"value\\\"\"}")); - - // json string escaping disabled: - mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", - Collections.singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_CONTENT_TYPE)); - compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); - executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); - rawResult = (BytesReference) executableScript.run(); - result = rawResult.utf8ToString(); - assertThat(result, equalTo("{ \"field1\": \"a \"value\"\"}")); - } public void testSizeAccessForCollectionsAndArrays() throws Exception { String[] randomArrayValues = generateRandomStringArray(10, 20, false); @@ -375,6 +357,44 @@ public class MustacheTests extends ESTestCase { assertScript("{{#join delimiter=' and '}}params{{/join delimiter=' and '}}", params, equalTo("1 and 2 and 3 and 4")); } + public void testUrlEncoder() { + Map urls = new HashMap<>(); + urls.put("https://www.elastic.co", + "https%3A%2F%2Fwww.elastic.co"); + urls.put("", + "%3Clogstash-%7Bnow%2Fd%7D%3E"); + urls.put("?query=(foo:A OR baz:B) AND title:/joh?n(ath[oa]n)/ AND date:{* TO 2012-01}", + "%3Fquery%3D%28foo%3AA+OR+baz%3AB%29+AND+title%3A%2Fjoh%3Fn%28ath%5Boa%5Dn%29%2F+AND+date%3A%7B*+TO+2012-01%7D"); + + for (Map.Entry url : urls.entrySet()) { + assertScript("{{#url}}{{params}}{{/url}}", singletonMap("params", url.getKey()), equalTo(url.getValue())); + } + } + + public void testUrlEncoderWithParam() throws Exception { + assertScript("{{#url}}{{index}}{{/url}}", singletonMap("index", ""), + equalTo("%3Clogstash-%7Bnow%2Fd%7BYYYY.MM.dd%7C%2B12%3A00%7D%7D%3E")); + + final String random = randomAsciiOfLength(10); + assertScript("{{#url}}prefix_{{s}}{{/url}}", singletonMap("s", random), + equalTo("prefix_" + URLEncoder.encode(random, StandardCharsets.UTF_8.name()))); + } + + public void testUrlEncoderWithJoin() { + Map params = singletonMap("emails", Arrays.asList("john@smith.com", "john.smith@email.com", "jsmith@email.com")); + assertScript("?query={{#url}}{{#join}}emails{{/join}}{{/url}}", params, + equalTo("?query=john%40smith.com%2Cjohn.smith%40email.com%2Cjsmith%40email.com")); + + params = singletonMap("indices", new String[]{"", "", ""}); + assertScript("{{#url}}https://localhost:9200/{{#join}}indices{{/join}}/_stats{{/url}}", params, + equalTo("https%3A%2F%2Flocalhost%3A9200%2F%3Clogstash-%7Bnow%2Fd-2d%7D" + + "%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E%2F_stats")); + + params = singletonMap("fibonacci", new int[]{1, 1, 2, 3, 5, 8, 13, 21, 34, 55}); + assertScript("{{#url}}{{#join delimiter='+'}}fibonacci{{/join delimiter='+'}}{{/url}}", params, + equalTo("1%2B1%2B2%2B3%2B5%2B8%2B13%2B21%2B34%2B55")); + } + private void assertScript(String script, Map vars, Matcher matcher) { Object result = engine.executable(new CompiledScript(INLINE, "inline", "mustache", compile(script)), vars).run(); assertThat(result, notNullValue()); diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml new file mode 100644 index 00000000000..a4e1dde4632 --- /dev/null +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/25_custom_functions.yaml @@ -0,0 +1,43 @@ +--- +"Rendering using {{url}} function": + + - do: + render_search_template: + body: > + { + "inline": { + "query": { + "match": { + "url": "https://localhost:9200/{{#url}}{{index}}{{/url}}/{{#url}}{{type}}{{/url}}/_search" + } + } + }, + "params": { + "index": "", + "type" : "métriques" + } + } + + - match: { template_output.query.match.url: "https://localhost:9200/%3Clogstash-%7Bnow%2Fd-2d%7D%3E/m%C3%A9triques/_search" } + +--- +"Rendering using {{url}} and {{join}} functions": + + - do: + render_search_template: + body: > + { + "inline": { + "query": { + "match": { + "url": "{{#url}}https://localhost:9200/{{#join}}indices{{/join}}/_stats{{/url}}" + } + } + }, + "params": { + "indices": ["", "", ""] + } + } + + # Decoded URL is https://localhost:9200/,,/_stats + - match: { template_output.query.match.url: "https%3A%2F%2Flocalhost%3A9200%2F%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E%2F_stats" } From 3bba7dbe07b007804a47cf2460ece5be9979f181 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 13 Oct 2016 10:49:32 -0400 Subject: [PATCH 39/53] Docs: note about snapshot version compatibility (#20896) It is important that folks understand that snapshot/restore isn't for archiving. It is appropriate for backup and disaster recovery but not for archival over long periods of time because of version incompatibility. Closes #20866 --- docs/reference/modules/snapshots.asciidoc | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 4d74500d68e..aa6846d1e8a 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -1,9 +1,20 @@ [[modules-snapshots]] == Snapshot And Restore -The snapshot and restore module allows to create snapshots of individual indices or an entire cluster into a remote -repository. At the time of the initial release only shared file system repository was supported, but now a range of -backends are available via officially supported repository plugins. +The snapshot and restore module allows to create snapshots of individual +indices or an entire cluster into a remote repository like shared file system, +S3, or HDFS. These snapshots are great for backups because they can be restored +relatively quickly but they are not archival because they can only be restored +to versions of Elasticsearch that can read the index. That means that: + +* A snapshot of an index created in 2.x can be restored to 5.x. +* A snapshot of an index created in 1.x can be restored to 2.x. +* A snapshot of an index created in 1.x can **not** be restored to 5.x. + +To restore a snapshot of an index created in 1.x to 5.x you can restore it to +a 2.x cluster and use <> to rebuild +the index in a 5.x cluster. This is as time consuming as restoring from +archival copies of the original data. [float] === Repositories @@ -516,5 +527,3 @@ well as the global metadata were readable. The restore operation requires the gl the index level blocks are ignored during restore because indices are essentially recreated during restore. Please note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal repository operations such as listing or deleting snapshots from an already registered repository. - - From 7e5d8a6e5a888bd6ec754082b04d23935d5e3c27 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 13 Oct 2016 17:39:19 +0200 Subject: [PATCH 40/53] [TEST] use a different node ID for the test nodes --- .../org/elasticsearch/action/search/SearchAsyncActionTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 0cd8015a374..1aafa1d343b 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -68,7 +68,7 @@ public class SearchAsyncActionTests extends ESTestCase { } }; DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); - DiscoveryNode replicaNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); Map> nodeToContextMap = new HashMap<>(); AtomicInteger contextIdGenerator = new AtomicInteger(0); From e20d9d647838f8455fcedea56cb945a869370849 Mon Sep 17 00:00:00 2001 From: kunal642 Date: Thu, 13 Oct 2016 21:42:57 +0530 Subject: [PATCH 41/53] Removed unnecessary assertion on boolean values (#20910) * Removed unnecessary assertion on boolean values * Reversed changes for false assertion * corrected formatting * reverted changes for SettingsUpdater --- .../action/termvectors/TermVectorsFields.java | 12 ++++++------ .../org/elasticsearch/common/util/BigArrays.java | 2 +- .../org/elasticsearch/gateway/AsyncShardFetch.java | 14 +++++++------- .../gateway/DanglingIndicesState.java | 2 +- .../elasticsearch/gateway/GatewayAllocator.java | 6 +++--- .../elasticsearch/gateway/GatewayMetaState.java | 2 +- .../gateway/PrimaryShardAllocator.java | 2 +- .../gateway/ReplicaShardAllocator.java | 4 ++-- .../plain/GeoPointArrayIndexFieldData.java | 2 +- .../index/mapper/LegacyGeoPointFieldMapper.java | 2 +- .../index/query/GeoBoundingBoxQueryBuilder.java | 2 +- .../index/query/GeoDistanceQueryBuilder.java | 2 +- .../index/query/GeoDistanceRangeQueryBuilder.java | 4 ++-- .../index/shard/ShadowIndexShard.java | 2 +- .../search/sort/GeoDistanceSortBuilder.java | 2 +- .../index/mapper/GeoPointFieldMapperTests.java | 2 +- .../test/geo/RandomShapeGenerator.java | 2 +- 17 files changed, 32 insertions(+), 32 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 0ae8824ce8d..534ef4164e2 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -105,13 +105,13 @@ import static org.apache.lucene.util.ArrayUtil.grow; *
  • vint: frequency (always returned)
  • *
  • *
      - *
    • vint: position_1 (if positions == true)
    • - *
    • vint: startOffset_1 (if offset == true)
    • - *
    • vint: endOffset_1 (if offset == true)
    • - *
    • BytesRef: payload_1 (if payloads == true)
    • + *
    • vint: position_1 (if positions)
    • + *
    • vint: startOffset_1 (if offset)
    • + *
    • vint: endOffset_1 (if offset)
    • + *
    • BytesRef: payload_1 (if payloads)
    • *
    • ...
    • - *
    • vint: endOffset_freqency (if offset == true)
    • - *
    • BytesRef: payload_freqency (if payloads == true)
    • + *
    • vint: endOffset_freqency (if offset)
    • + *
    • BytesRef: payload_freqency (if payloads)
    • *
  • * */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java index 6a15a3d9000..728db17c2a4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -397,7 +397,7 @@ public class BigArrays implements Releasable { void adjustBreaker(long delta) { if (this.breakerService != null) { CircuitBreaker breaker = this.breakerService.getBreaker(CircuitBreaker.REQUEST); - if (this.checkBreaker == true) { + if (this.checkBreaker) { // checking breaker means potentially tripping, but it doesn't // have to if the delta is negative if (delta > 0) { diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 42c40034b10..37277586bf7 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -124,7 +124,7 @@ public abstract class AsyncShardFetch implements Rel } // if we are still fetching, return null to indicate it - if (hasAnyNodeFetching(cache) == true) { + if (hasAnyNodeFetching(cache)) { return new FetchResult<>(shardId, null, emptySet(), emptySet()); } else { // nothing to fetch, yay, build the return value @@ -137,7 +137,7 @@ public abstract class AsyncShardFetch implements Rel DiscoveryNode node = nodes.get(nodeId); if (node != null) { - if (nodeEntry.isFailed() == true) { + if (nodeEntry.isFailed()) { // if its failed, remove it from the list of nodes, so if this run doesn't work // we try again next round to fetch it again it.remove(); @@ -361,7 +361,7 @@ public abstract class AsyncShardFetch implements Rel } void doneFetching(T value) { - assert fetching == true : "setting value but not in fetching mode"; + assert fetching : "setting value but not in fetching mode"; assert failure == null : "setting value when failure already set"; this.valueSet = true; this.value = value; @@ -369,7 +369,7 @@ public abstract class AsyncShardFetch implements Rel } void doneFetching(Throwable failure) { - assert fetching == true : "setting value but not in fetching mode"; + assert fetching : "setting value but not in fetching mode"; assert valueSet == false : "setting failure when already set value"; assert failure != null : "setting failure can't be null"; this.failure = failure; @@ -377,7 +377,7 @@ public abstract class AsyncShardFetch implements Rel } void restartFetching() { - assert fetching == true : "restarting fetching, but not in fetching mode"; + assert fetching : "restarting fetching, but not in fetching mode"; assert valueSet == false : "value can't be set when restarting fetching"; assert failure == null : "failure can't be set when restarting fetching"; this.fetching = false; @@ -388,7 +388,7 @@ public abstract class AsyncShardFetch implements Rel } boolean hasData() { - return valueSet == true || failure != null; + return valueSet || failure != null; } Throwable getFailure() { @@ -399,7 +399,7 @@ public abstract class AsyncShardFetch implements Rel @Nullable T getValue() { assert failure == null : "trying to fetch value, but its marked as failed, check isFailed"; - assert valueSet == true : "value is not set, hasn't been fetched yet"; + assert valueSet : "value is not set, hasn't been fetched yet"; return value; } } diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 370778898fc..0c829e88182 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -153,7 +153,7 @@ public class DanglingIndicesState extends AbstractComponent { * for allocation. */ private void allocateDanglingIndices() { - if (danglingIndices.isEmpty() == true) { + if (danglingIndices.isEmpty()) { return; } try { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index d75a864d8dd..65a2876b3aa 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -85,7 +85,7 @@ public class GatewayAllocator extends AbstractComponent { boolean cleanCache = false; DiscoveryNode localNode = event.state().nodes().getLocalNode(); if (localNode != null) { - if (localNode.isMasterNode() == true && event.localNodeMaster() == false) { + if (localNode.isMasterNode() && event.localNodeMaster() == false) { cleanCache = true; } } else { @@ -174,7 +174,7 @@ public class GatewayAllocator extends AbstractComponent { AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); - if (shardState.hasData() == true) { + if (shardState.hasData()) { shardState.processAllocation(allocation); } return shardState; @@ -199,7 +199,7 @@ public class GatewayAllocator extends AbstractComponent { } AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); - if (shardStores.hasData() == true) { + if (shardStores.hasData()) { shardStores.processAllocation(allocation); } return shardStores; diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a05e85299a8..b609d0bacae 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -192,7 +192,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL Set relevantIndices; if (isDataOnlyNode(state)) { relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices); - } else if (state.nodes().getLocalNode().isMasterNode() == true) { + } else if (state.nodes().getLocalNode().isMasterNode()) { relevantIndices = getRelevantIndicesForMasterEligibleNode(state); } else { relevantIndices = Collections.emptySet(); diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 15acd625248..7d8e8327d39 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -195,7 +195,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { "the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]", decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain)); - } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { + } else if (nodesToAllocate.throttleNodeShards.isEmpty() && !nodesToAllocate.noNodeShards.isEmpty()) { // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard // can be force-allocated to one of the nodes. final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate( diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 620fd354327..390f3cb379e 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -65,7 +65,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { List shardCancellationActions = new ArrayList<>(); for (RoutingNode routingNode : routingNodes) { for (ShardRouting shard : routingNode) { - if (shard.primary() == true) { + if (shard.primary()) { continue; } if (shard.initializing() == false) { @@ -109,7 +109,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { } if (currentNode.equals(nodeWithHighestMatch) == false && Objects.equals(currentSyncId, primaryStore.syncId()) == false - && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) { + && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch)) { // we found a better match that has a full sync id match, the existing allocation is not fully synced // so we found a better one, cancel this one logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java index d484c503c2b..18313f32745 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java @@ -68,7 +68,7 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData estimator.afterLoad(null, data.ramBytesUsed()); return data; } - return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ? + return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java index 99ca07b06bf..fc46a08ce1a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java @@ -297,7 +297,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement validPoint = true; } - if (coerce.value() == true && validPoint == false) { + if (coerce.value() && validPoint == false) { // by setting coerce to false we are assuming all geopoints are already in a valid coordinate system // thus this extra step can be skipped GeoUtils.normalizePoint(point, true, true); diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 26b979e45fc..1cfe2acb246 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -299,7 +299,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder geoDistance = GeoDistance.fromString(parser.text()); } else if (parseFieldMatcher.match(currentName, COERCE_FIELD)) { coerce = parser.booleanValue(); - if (coerce == true) { + if (coerce) { ignoreMalformed = true; } } else if (parseFieldMatcher.match(currentName, IGNORE_MALFORMED_FIELD)) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 7c4acb44039..8226d18239a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -84,7 +84,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("point.lon"), notNullValue()); assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(stored)); assertThat(doc.rootDoc().getField("point.geohash"), nullValue()); - if (indexCreatedBefore22 == true) { + if (indexCreatedBefore22) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 20c82e6f518..eeebe8cbcdc 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -257,7 +257,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { if (nearP == null) nearP = xRandomPointIn(r, bounds); - if (small == true) { + if (small) { // between 3 and 6 degrees final double latRange = 3 * r.nextDouble() + 3; final double lonRange = 3 * r.nextDouble() + 3; From 279baa0284b17126117f666463cb19e9e5d216d2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 13 Oct 2016 13:21:57 -0400 Subject: [PATCH 42/53] Add a flush to test in _cat/indices.asciidoc We test that sorting by `store.size` works but sometimes the sizes aren't what we expect. At least in CI: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-unix-compatibility/os=opensuse/101/console https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+5.x+multijob-unix-compatibility/os=centos/100/console I haven't been able to reproduce it locally but adding a `_flush` won't hurt and might make the inconsistency vanish. --- docs/reference/cat/indices.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 91965f7b6fc..11dfb3c7c6a 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -67,6 +67,7 @@ GET /_cat/indices?v&s=store.size:desc -------------------------------------------------- // CONSOLE // TEST[continued] +// TEST[s/^/POST _flush\n/] Which looks like: From feefb71007c3777ddb50c003372a878c3e1c3b12 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 13 Oct 2016 20:21:49 +0200 Subject: [PATCH 43/53] Renamed v5.0.0-rc2 to 5.0.0 --- core/src/main/java/org/elasticsearch/Version.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 9002328591c..9010d24e376 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -89,8 +89,8 @@ public class Version { public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_5_0_0_rc1_ID = 5000051; public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_rc2_ID = 5000052; - public static final Version V_5_0_0_rc2 = new Version(V_5_0_0_rc2_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final int V_5_0_0_ID = 5000052; + public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final Version CURRENT = V_6_0_0_alpha1; @@ -117,8 +117,8 @@ public class Version { switch (id) { case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_0_0_rc2_ID: - return V_5_0_0_rc2; + case V_5_0_0_ID: + return V_5_0_0; case V_5_0_0_rc1_ID: return V_5_0_0_rc1; case V_5_0_0_beta1_ID: From e739ecf67c39b572814cf59e037d993d3910860b Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 13 Oct 2016 20:42:43 +0200 Subject: [PATCH 44/53] Fix version constant for 5.0.0 --- core/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 9010d24e376..47cd03d1217 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -89,7 +89,7 @@ public class Version { public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_5_0_0_rc1_ID = 5000051; public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_ID = 5000052; + public static final int V_5_0_0_ID = 5000099; public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); From b2c8da25fd02bb5bb568cfb5f907bf2d71f5e3e0 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 13 Oct 2016 22:13:18 +0200 Subject: [PATCH 45/53] Remove unreleased version constant Some people apparently never run tests when they change this file. Neither do they read comments right below the line they change that they should do the change after all. --- core/src/main/java/org/elasticsearch/Version.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 47cd03d1217..f243bcdbdd8 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -89,8 +89,6 @@ public class Version { public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_5_0_0_rc1_ID = 5000051; public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final Version CURRENT = V_6_0_0_alpha1; @@ -117,8 +115,6 @@ public class Version { switch (id) { case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_0_0_ID: - return V_5_0_0; case V_5_0_0_rc1_ID: return V_5_0_0_rc1; case V_5_0_0_beta1_ID: From 68ed18338188f99a34b86609c36ef1633151ad2a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 13 Oct 2016 16:42:21 -0400 Subject: [PATCH 46/53] CONSOLEify a few more _cat docs `_cat/master`, `_cat/nodeattrs`, `_cat/nodes`. --- docs/build.gradle | 3 -- docs/reference/cat/master.asciidoc | 19 ++++++--- docs/reference/cat/nodeattrs.asciidoc | 49 +++++++++++---------- docs/reference/cat/nodes.asciidoc | 61 ++++++++++++--------------- 4 files changed, 66 insertions(+), 66 deletions(-) diff --git a/docs/build.gradle b/docs/build.gradle index 5a65065a639..3286648da96 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -93,9 +93,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc', 'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc', 'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc', - 'reference/cat/master.asciidoc', - 'reference/cat/nodeattrs.asciidoc', - 'reference/cat/nodes.asciidoc', 'reference/cat/pending_tasks.asciidoc', 'reference/cat/plugins.asciidoc', 'reference/cat/recovery.asciidoc', diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index caed564d7b5..cf203a3eee0 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -2,14 +2,22 @@ == cat master `master` doesn't have any extra options. It simply displays the -master's node ID, bound IP address, and node name. +master's node ID, bound IP address, and node name. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/master?v' -id ip node -Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA +GET /_cat/master?v -------------------------------------------------- +// CONSOLE + +might respond: + +[source,js] +-------------------------------------------------- +id host ip node +YzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2 +-------------------------------------------------- +// TESTRESPONSE[s/YzWoH_2.+/.+/ _cat] This information is also available via the `nodes` command, but this is slightly shorter when all you want to do, for example, is verify @@ -25,3 +33,4 @@ Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA [3] 19:16:37 [SUCCESS] es1.vm Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA -------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 1677583a709..a7cbecb50db 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -2,34 +2,26 @@ == cat nodeattrs The `nodeattrs` command shows custom node attributes. +For example: -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodeattrs -node host ip attr value -DKDM97B epsilon 192.168.1.8 rack rack314 -DKDM97B epsilon 192.168.1.8 azone us-east-1 +GET /_cat/nodeattrs?v -------------------------------------------------- +// CONSOLE -The first few columns give you basic info per node. +Could look like: - -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -node host ip -DKDM97B epsilon 192.168.1.8 -DKDM97B epsilon 192.168.1.8 +node host ip attr value +EK_AsJb 127.0.0.1 127.0.0.1 testattr test -------------------------------------------------- +// TESTRESPONSE[s/EK_AsJb/.+/ _cat] - -The attr and value columns can give you a picture of custom node attributes. - -[source,sh] --------------------------------------------------- -attr value -rack rack314 -azone us-east-1 --------------------------------------------------- +The first few columns (`node`, `host`, `ip`) give you basic info per node +and the `attr` and `value` columns give you the custom node attributes, +one per line. [float] === Columns @@ -49,13 +41,20 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodeattrs?v&h=name,pid,attr,value -name pid attr value -DKDM97B 28000 rack rack314 -DKDM97B 28000 azone us-east-1 +GET /_cat/nodeattrs?v&h=name,pid,attr,value -------------------------------------------------- +// CONSOLE + +Might look like: + +[source,js] +-------------------------------------------------- +name pid attr value +EK_AsJb 19566 testattr test +-------------------------------------------------- +// TESTRESPONSE[s/EK_AsJb/.+/ s/19566/\\d*/ _cat] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index b0b152d4c50..8885e490fca 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -1,38 +1,31 @@ [[cat-nodes]] == cat nodes -The `nodes` command shows the cluster topology. +The `nodes` command shows the cluster topology. For example -[source,sh] +[source,js] -------------------------------------------------- -% GET /_cat/nodes -192.168.56.30 9 78 22 1.80 2.05 2.51 mdi * bGG90GE -192.168.56.10 6 75 14 1.24 2.45 1.37 md - I8hydUG -192.168.56.20 5 71 12 1.07 1.05 1.11 di - H5dfFeA +GET /_cat/nodes?v -------------------------------------------------- +// CONSOLE -The first few columns tell you where your nodes live and give -a picture of your heap, memory, cpu and load. +Might look like: -[source,sh] +[source,js] -------------------------------------------------- -ip heap.percent ram.percent cpu load_1m load_5m load_15m -192.168.56.30 9 78 22 1.80 2.05 2.51 -192.168.56.10 6 75 14 1.24 2.45 1.37 -192.168.56.20 5 71 12 1.07 1.05 1.11 +ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name +127.0.0.1 65 99 42 3.07 mdi * mJw06l1 -------------------------------------------------- +// TESTRESPONSE[s/3.07/(\\d+\\.\\d+( \\d+\\.\\d+ (\\d+\\.\\d+)?)?)?/] +// TESTRESPONSE[s/65 99 42/\\d+ \\d+ \\d+/] +// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ _cat] -The last columns provide ancillary information that can often be -useful when looking at the cluster as a whole, particularly large -ones. How many master-eligible nodes do I have? +The first few columns (`ip, `heap.percent`, `ram.percent`, `cpu, `load_*`) tell +you where your nodes live and give a quick picture of performance stats. -[source,sh] --------------------------------------------------- -node.role master name -mdi * bGG90GE -md - I8hydUG -di - H5dfFeA --------------------------------------------------- +The last (`node.role`, `master`, and `name`) columns provide ancillary +information that can often be useful when looking at the cluster as a whole, +particularly large ones. How many master-eligible nodes do I have? [float] === Columns @@ -52,18 +45,20 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodes?v&h=id,ip,port,v,m -id ip port v m -pLSN 192.168.56.30 9300 {version} - -k0zy 192.168.56.10 9300 {version} - -6Tyi 192.168.56.20 9300 {version} * -% curl 192.168.56.10:9200/_cat/nodes?h=id,ip,port,v,m -pLSN 192.168.56.30 9300 {version} - -k0zy 192.168.56.10 9300 {version} - -6Tyi 192.168.56.20 9300 {version} * +GET /_cat/nodes?v&h=id,ip,port,v,m -------------------------------------------------- +// CONSOLE + +Might look like: + +["source","js",subs="attributes,callouts"] +-------------------------------------------------- +id ip port v m +veJR 127.0.0.1 59938 {version} * +-------------------------------------------------- +// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ _cat] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= From f5e1c6d330dd6fb741f24260ad65570f54947733 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 14 Oct 2016 09:26:33 +0200 Subject: [PATCH 47/53] Update Delete/Update-By-Query REST Specs (#20915) This commit removes unused parameters from the Update-By-Query and Delete-By-Query REST specification files. --- .../rest-api-spec/api/delete_by_query.json | 36 +--------------- .../rest-api-spec/api/update_by_query.json | 42 +------------------ 2 files changed, 3 insertions(+), 75 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 8c2b257fcbf..1d98a35daa1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -36,18 +36,6 @@ "type" : "string", "description" : "The field to use as default where no field prefix is given in the query string" }, - "explain": { - "type" : "boolean", - "description" : "Specify whether to return detailed information about score computation as part of a hit" - }, - "stored_fields": { - "type" : "list", - "description" : "A comma-separated list of stored fields to return as part of a hit" - }, - "docvalue_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, "from": { "type" : "number", "description" : "Starting offset (default: 0)" @@ -134,32 +122,10 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "suggest_field": { - "type" : "string", - "description" : "Specify which field to use for suggestions" - }, - "suggest_mode": { - "type" : "enum", - "options" : ["missing", "popular", "always"], - "default" : "missing", - "description" : "Specify suggest mode" - }, - "suggest_size": { - "type" : "number", - "description" : "How many suggestions to return in response" - }, - "suggest_text": { - "type" : "string", - "description" : "The source text for which the suggestions should be returned" - }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" }, - "track_scores": { - "type" : "boolean", - "description": "Whether to calculate and return scores even if they are not used for sorting" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" @@ -194,7 +160,7 @@ "requests_per_second": { "type": "number", "default": 0, - "description": "The throttle for this request in sub-requests per second. -1 means set no throttle." + "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 3974eaae88d..1cc89f1472d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -36,22 +36,6 @@ "type" : "string", "description" : "The field to use as default where no field prefix is given in the query string" }, - "explain": { - "type" : "boolean", - "description" : "Specify whether to return detailed information about score computation as part of a hit" - }, - "stored_fields": { - "type" : "list", - "description" : "A comma-separated list of stored fields to return as part of a hit" - }, - "docvalue_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, - "fielddata_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, "from": { "type" : "number", "description" : "Starting offset (default: 0)" @@ -69,7 +53,7 @@ "type" : "enum", "options": ["abort", "proceed"], "default": "abort", - "description" : "What to do when the reindex hits version conflicts?" + "description" : "What to do when the update by query hits version conflicts?" }, "expand_wildcards": { "type" : "enum", @@ -142,32 +126,10 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "suggest_field": { - "type" : "string", - "description" : "Specify which field to use for suggestions" - }, - "suggest_mode": { - "type" : "enum", - "options" : ["missing", "popular", "always"], - "default" : "missing", - "description" : "Specify suggest mode" - }, - "suggest_size": { - "type" : "number", - "description" : "How many suggestions to return in response" - }, - "suggest_text": { - "type" : "string", - "description" : "The source text for which the suggestions should be returned" - }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" }, - "track_scores": { - "type" : "boolean", - "description": "Whether to calculate and return scores even if they are not used for sorting" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" @@ -201,7 +163,7 @@ "wait_for_completion": { "type" : "boolean", "default": false, - "description" : "Should the request should block until the reindex is complete." + "description" : "Should the request should block until the update by query operation is complete." }, "requests_per_second": { "type": "number", From 7f7e99e10b1f4588b40bb43fd9dc8ebb38573b23 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 14 Oct 2016 10:05:23 +0200 Subject: [PATCH 48/53] [TEST] Add basic tests for ExplainRequest and ShardValidationQueryRequest --- .../query/ShardValidateQueryRequest.java | 6 +- .../action/ExplainRequestTests.java | 77 ++++++++++++++++++ .../ShardValidateQueryRequestTests.java | 78 +++++++++++++++++++ 3 files changed, 158 insertions(+), 3 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java create mode 100644 core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 831ef6e1060..bf0220911fd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -47,7 +47,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { } - ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) { + public ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) { super(shardId, request); this.query = request.query(); this.types = request.types(); @@ -69,8 +69,8 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { return this.explain; } - public boolean rewrite() { - return this.rewrite; + public boolean rewrite() { + return this.rewrite; } public String[] filteringAliases() { diff --git a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java new file mode 100644 index 00000000000..5e7040a707a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class ExplainRequestTests extends ESTestCase { + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + + public void testSerialize() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ExplainRequest request = new ExplainRequest("index", "type", "id"); + request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); + request.filteringAlias(new String[] {"alias0", "alias1"}); + request.preference("the_preference"); + request.query(QueryBuilders.termQuery("field", "value")); + request.storedFields(new String[] {"field1", "field2"}); + request.routing("some_routing"); + request.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + ExplainRequest readRequest = new ExplainRequest(); + readRequest.readFrom(in); + assertArrayEquals(request.filteringAlias(), readRequest.filteringAlias()); + assertArrayEquals(request.storedFields(), readRequest.storedFields()); + assertEquals(request.preference(), readRequest.preference()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.routing(), readRequest.routing()); + assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java new file mode 100644 index 00000000000..5d75722a13d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.action.admin.indices.validate.query.ShardValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class ShardValidateQueryRequestTests extends ESTestCase { + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + + public void testSerialize() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); + validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); + validateQueryRequest.rewrite(true); + validateQueryRequest.explain(false); + validateQueryRequest.types("type1", "type2"); + ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), + new String[] {"alias0", "alias1"}, validateQueryRequest); + request.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); + readRequest.readFrom(in); + assertArrayEquals(request.filteringAliases(), readRequest.filteringAliases()); + assertArrayEquals(request.types(), readRequest.types()); + assertEquals(request.explain(), readRequest.explain()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.rewrite(), readRequest.rewrite()); + assertEquals(request.shardId(), readRequest.shardId()); + } + } + } +} From 595ec8c94861eaedce8c210547109062ba0736f5 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 14 Oct 2016 05:47:26 -0400 Subject: [PATCH 49/53] Remove artificial default processors limit Today Elasticsearch limits the number of processors used in computing thread counts to 32. This was from a time when Elasticsearch created more threads than it does now and users would run into out of memory errors. It appears the real cause of these out of memory errors was not well understood (it's often due to ulimit settings) and so users were left hitting these out of memory errors on boxes with high core counts. Today Elasticsearch creates less threads (but still a lot) and we have a bootstrap check in place to ensure that the relevant ulimit is not too low. There are some caveats still to having too many concurrent indexing threads as it can lead to too many little segments, and it's not a magical go faster knob if indexing is already bottlenecked by disk, but this limitation is artificial and surprising to users and so it should be removed. This commit also increases the lower bound of the max processes ulimit, to prepare for a world where Elasticsearch instances might be running with more the previous cap of 32 processors. With the current settings, Elasticsearch wants to create roughly 576 + 25 * p / 2 threads, where p is the number of processors. Add in roughly 7 * p / 8 threads for the GC threads and a fudge factor, and 4096 should cover us pretty well up to 256 cores. Relates #20874 --- .../action/search/TransportMultiSearchAction.java | 2 +- .../elasticsearch/bootstrap/BootstrapCheck.java | 3 ++- .../common/util/PageCacheRecycler.java | 2 +- .../common/util/concurrent/EsExecutors.java | 15 ++++++++------- .../elasticsearch/index/MergeSchedulerConfig.java | 2 +- .../org/elasticsearch/monitor/os/OsService.java | 2 +- .../threadpool/FixedExecutorBuilder.java | 3 +-- .../org/elasticsearch/threadpool/ThreadPool.java | 2 +- .../threadpool/FixedThreadPoolTests.java | 2 +- .../threadpool/UpdateThreadPoolSettingsTests.java | 6 +++--- .../http/netty3/Netty3HttpServerTransport.java | 2 +- .../transport/netty3/Netty3Transport.java | 2 +- .../http/netty4/Netty4HttpServerTransport.java | 2 +- .../transport/netty4/Netty4Transport.java | 2 +- 14 files changed, 24 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index efd04035276..2bceccce385 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -51,7 +51,7 @@ public class TransportMultiSearchAction extends HandledTransportAction PROCESSORS_SETTING = - Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope); + Setting.intSetting("processors", Runtime.getRuntime().availableProcessors(), 1, Property.NodeScope); /** - * Returns the number of processors available but at most 32. + * Returns the number of available processors. Defaults to + * {@link Runtime#availableProcessors()} but can be overridden by passing a {@link Settings} + * instance with the key "processors" set to the desired value. + * + * @param settings a {@link Settings} instance from which to derive the available processors + * @return the number of available processors */ - public static int boundedNumberOfProcessors(Settings settings) { - /* This relates to issues where machines with large number of cores - * ie. >= 48 create too many threads and run into OOM see #3478 - * We just use an 32 core upper-bound here to not stress the system - * too much with too many created threads */ + public static int numberOfProcessors(final Settings settings) { return PROCESSORS_SETTING.get(settings); } diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java index e04d3dc7a49..3707d9259b1 100644 --- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java @@ -54,7 +54,7 @@ public final class MergeSchedulerConfig { public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", - (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), + (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.numberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic, Property.IndexScope); public static final Setting MAX_MERGE_COUNT_SETTING = diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index cb67eef852c..f37daddbb06 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -41,7 +41,7 @@ public class OsService extends AbstractComponent { super(settings); this.probe = OsProbe.getInstance(); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); - this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.boundedNumberOfProcessors(settings)); + this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.numberOfProcessors(settings)); this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); logger.debug("using refresh_interval [{}]", refreshInterval); } diff --git a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index de7dbbaefc9..9e5469fd16a 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.Node; @@ -79,7 +78,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder builders = new HashMap<>(); - final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + final int availableProcessors = EsExecutors.numberOfProcessors(settings); final int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors); final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors); final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); diff --git a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 48ea8b6c8c9..5ec0f30f520 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -33,7 +33,7 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); // some of the fixed thread pool are bound by the number of // cores so we can not exceed that - final int size = randomIntBetween(1, EsExecutors.boundedNumberOfProcessors(Settings.EMPTY)); + final int size = randomIntBetween(1, EsExecutors.numberOfProcessors(Settings.EMPTY)); final int queueSize = randomIntBetween(1, 16); final long rejections = randomIntBetween(1, 16); diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 87accf057ad..29053400931 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -62,7 +62,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { public void testIndexingThreadPoolsMaxSize() throws InterruptedException { final String name = randomFrom(Names.BULK, Names.INDEX); - final int maxSize = 1 + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY); + final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY); final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE); // try to create a too big thread pool @@ -89,7 +89,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { - return Math.min(size, EsExecutors.boundedNumberOfProcessors(settings)); + return Math.min(size, EsExecutors.numberOfProcessors(settings)); } else { return size; } @@ -185,7 +185,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { new ScalingExecutorBuilder( "my_pool1", 1, - EsExecutors.boundedNumberOfProcessors(Settings.EMPTY), + EsExecutors.numberOfProcessors(Settings.EMPTY), TimeValue.timeValueMinutes(1)); final FixedExecutorBuilder fixed = new FixedExecutorBuilder(Settings.EMPTY, "my_pool2", 1, 1); diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java index 114d7c6b303..c19cbbb7c57 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java @@ -125,7 +125,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_TCP_NO_DELAY = diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java index eb8d14b08fc..9d71fec9c90 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java @@ -92,7 +92,7 @@ public class Netty3Transport extends TcpTransport { public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 0f1fe2a9059..20cdfe0a128 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -127,7 +127,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_TCP_NO_DELAY = diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 3787b29ab63..77429788317 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -95,7 +95,7 @@ public class Netty4Transport extends TcpTransport { public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = From aabbbc2202f225477aa60bda89f4088f679ff820 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 14 Oct 2016 13:21:39 +0200 Subject: [PATCH 50/53] Remove duplicate timeout parameter in Delete/Update-By-Query REST Specs (#20934) This commit removes the duplicated "timeout" parameter introduced in #20915 --- .../src/main/resources/rest-api-spec/api/delete_by_query.json | 4 ---- .../src/main/resources/rest-api-spec/api/update_by_query.json | 4 ---- 2 files changed, 8 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 1d98a35daa1..f97492aa7ab 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -122,10 +122,6 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 1cc89f1472d..4b9e76ac59f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -126,10 +126,6 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" From ddced5df1a924a90a3d8d549479941087859e111 Mon Sep 17 00:00:00 2001 From: Jun Ohtani Date: Fri, 14 Oct 2016 17:00:06 +0900 Subject: [PATCH 51/53] IndexSettings should not be Null in Mapper.BuildContext Rename method name Change validation Closes #20174 --- .../java/org/elasticsearch/index/mapper/Mapper.java | 2 +- .../org/elasticsearch/index/mapper/MapperTests.java | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index d45283a9416..1c54c2136c9 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -39,7 +39,7 @@ public abstract class Mapper implements ToXContent, Iterable { private final ContentPath contentPath; public BuilderContext(Settings indexSettings, ContentPath contentPath) { - assert indexSettings != null; + Objects.requireNonNull(indexSettings, "indexSettings is required"); this.contentPath = contentPath; this.indexSettings = indexSettings; } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java index b5979db094c..f4e83dde46a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java @@ -24,20 +24,19 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import static org.hamcrest.Matchers.equalTo; - public class MapperTests extends ESTestCase { - public void testBuilderContextWithIndexSettings() { + public void testSuccessfulBuilderContext() { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); - Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath(1)); + ContentPath contentPath = new ContentPath(1); + Mapper.BuilderContext context = new Mapper.BuilderContext(settings, contentPath); - assertNotNull(context.indexSettings()); - assertThat(context.indexSettings(), equalTo(settings)); + assertEquals(settings, context.indexSettings()); + assertEquals(contentPath, context.path()); } public void testBuilderContextWithIndexSettingsAsNull() { - AssertionError e = expectThrows(AssertionError.class, () -> new Mapper.BuilderContext(null, new ContentPath(1))); + NullPointerException e = expectThrows(NullPointerException.class, () -> new Mapper.BuilderContext(null, new ContentPath(1))); } From cff5993318620863b0b7cbc494178ec5dc4faa0c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 14 Oct 2016 16:26:09 +0200 Subject: [PATCH 52/53] Parse alias filters on the coordinating node (#20916) Today we don't parse alias filters on the coordinating node, we only forward the alias patters to executing node and resolve it late. This has several problems like requests that go through filtered aliases are never cached if they use date math, since the parsing happens very late in the process even without rewriting. It also used to be processed on every shard while we can only do it once per index on the coordinating node. Another nice side-effect is that we are never prone to cluster-state updates that change an alias, all nodes will execute the exact same alias filter since they are process based on the same cluster state. --- .../main/java/org/elasticsearch/Version.java | 2 +- .../query/ShardValidateQueryRequest.java | 34 +--- .../query/TransportValidateQueryAction.java | 10 +- .../action/explain/ExplainRequest.java | 11 +- .../explain/TransportExplainAction.java | 7 +- .../search/AbstractSearchAsyncAction.java | 15 +- .../SearchDfsQueryAndFetchAsyncAction.java | 5 +- .../SearchDfsQueryThenFetchAsyncAction.java | 5 +- .../SearchQueryAndFetchAsyncAction.java | 7 +- .../SearchQueryThenFetchAsyncAction.java | 6 +- .../action/search/TransportSearchAction.java | 37 +++-- .../action/search/TransportSearchHelper.java | 5 - .../broadcast/TransportBroadcastAction.java | 5 +- .../shard/TransportSingleShardAction.java | 3 +- .../common/bytes/BytesArray.java | 6 - .../org/elasticsearch/index/IndexService.java | 69 -------- .../elasticsearch/indices/IndicesService.java | 13 ++ .../search/DefaultSearchContext.java | 9 +- .../elasticsearch/search/SearchService.java | 44 +++-- .../search/internal/AliasFilter.java | 121 ++++++++++++++ .../internal/ShardSearchLocalRequest.java | 24 +-- .../search/internal/ShardSearchRequest.java | 75 ++++++++- .../internal/ShardSearchTransportRequest.java | 7 +- .../java/org/elasticsearch/VersionTests.java | 6 +- .../action/ExplainRequestTests.java | 39 ++++- .../ShardValidateQueryRequestTests.java | 40 ++++- .../index/IndexServiceTests.java | 76 --------- .../index/SearchSlowLogTests.java | 5 +- .../indices/IndicesRequestCacheIT.java | 7 +- .../search/AbstractSearchTestCase.java | 3 + .../search/SearchServiceTests.java | 4 +- .../ShardSearchTransportRequestTests.java | 155 +++++++++++++++++- .../rest-api-spec/test/explain/10_basic.yaml | 4 +- .../test/indices.validate_query/10_basic.yaml | 12 ++ 34 files changed, 596 insertions(+), 275 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index f243bcdbdd8..0d8c3b72672 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -97,7 +97,7 @@ public class Version { * If you need a version that doesn't exist here for instance V_5_1_0 then go and create such a version * as a constant where you need it: *
    -     *   public static final Version V_5_1_0_UNRELEASED = new Version(5010099, Version.CURRENT.luceneVersion);
    +     *   public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
          * 
    * Then go to VersionsTest.java and add a test for this constant VersionTests#testUnknownVersions(). * This is particularly useful if you are building a feature that needs a BWC layer for this unreleased version etc.*/ diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index bf0220911fd..2ccf2f1bd3e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -20,14 +20,15 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.internal.AliasFilter; import java.io.IOException; +import java.util.Objects; /** * Internal validate request executed directly against a specific index shard. @@ -39,21 +40,18 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { private boolean explain; private boolean rewrite; private long nowInMillis; - - @Nullable - private String[] filteringAliases; + private AliasFilter filteringAliases; public ShardValidateQueryRequest() { - } - public ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) { + public ShardValidateQueryRequest(ShardId shardId, AliasFilter filteringAliases, ValidateQueryRequest request) { super(shardId, request); this.query = request.query(); this.types = request.types(); this.explain = request.explain(); this.rewrite = request.rewrite(); - this.filteringAliases = filteringAliases; + this.filteringAliases = Objects.requireNonNull(filteringAliases, "filteringAliases must not be null"); this.nowInMillis = request.nowInMillis; } @@ -73,7 +71,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { return this.rewrite; } - public String[] filteringAliases() { + public AliasFilter filteringAliases() { return filteringAliases; } @@ -93,14 +91,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { types[i] = in.readString(); } } - int aliasesSize = in.readVInt(); - if (aliasesSize > 0) { - filteringAliases = new String[aliasesSize]; - for (int i = 0; i < aliasesSize; i++) { - filteringAliases[i] = in.readString(); - } - } - + filteringAliases = new AliasFilter(in); explain = in.readBoolean(); rewrite = in.readBoolean(); nowInMillis = in.readVLong(); @@ -110,20 +101,11 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - out.writeVInt(types.length); for (String type : types) { out.writeString(type); } - if (filteringAliases != null) { - out.writeVInt(filteringAliases.length); - for (String alias : filteringAliases) { - out.writeString(alias); - } - } else { - out.writeVInt(0); - } - + filteringAliases.writeTo(out); out.writeBoolean(explain); out.writeBoolean(rewrite); out.writeVLong(nowInMillis); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 25ced69f03a..b80b721149c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.action.ActionListener; @@ -43,6 +42,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.tasks.Task; @@ -77,8 +77,9 @@ public class TransportValidateQueryAction extends TransportBroadcastAction { private String[] storedFields; private FetchSourceContext fetchSourceContext; - private String[] filteringAlias = Strings.EMPTY_ARRAY; + private AliasFilter filteringAlias = new AliasFilter(null, Strings.EMPTY_ARRAY); long nowInMillis; @@ -131,11 +132,11 @@ public class ExplainRequest extends SingleShardRequest { return this; } - public String[] filteringAlias() { + public AliasFilter filteringAlias() { return filteringAlias; } - public ExplainRequest filteringAlias(String[] filteringAlias) { + public ExplainRequest filteringAlias(AliasFilter filteringAlias) { if (filteringAlias != null) { this.filteringAlias = filteringAlias; } @@ -166,7 +167,7 @@ public class ExplainRequest extends SingleShardRequest { routing = in.readOptionalString(); preference = in.readOptionalString(); query = in.readNamedWriteable(QueryBuilder.class); - filteringAlias = in.readStringArray(); + filteringAlias = new AliasFilter(in); storedFields = in.readOptionalStringArray(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); nowInMillis = in.readVLong(); @@ -180,7 +181,7 @@ public class ExplainRequest extends SingleShardRequest { out.writeOptionalString(routing); out.writeOptionalString(preference); out.writeNamedWriteable(query); - out.writeStringArray(filteringAlias); + filteringAlias.writeTo(out); out.writeOptionalStringArray(storedFields); out.writeOptionalWriteable(fetchSourceContext); out.writeVLong(nowInMillis); diff --git a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index dde4e3f42ad..65176c1df39 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.rescore.RescoreSearchContext; @@ -78,7 +79,9 @@ public class TransportExplainAction extends TransportSingleShardAction extends AbstractAsyncAction { @@ -64,7 +64,7 @@ abstract class AbstractSearchAsyncAction protected final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger totalOps = new AtomicInteger(); protected final AtomicArray firstResults; - private final Map perIndexFilteringAliases; + private final Map aliasFilter; private final long clusterStateVersion; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); @@ -72,7 +72,7 @@ abstract class AbstractSearchAsyncAction protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, Function nodeIdToDiscoveryNode, - Map perIndexFilteringAliases, Executor executor, SearchRequest request, + Map aliasFilter, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { super(startTime); @@ -81,7 +81,6 @@ abstract class AbstractSearchAsyncAction this.executor = executor; this.request = request; this.listener = listener; - this.perIndexFilteringAliases = perIndexFilteringAliases; this.nodeIdToDiscoveryNode = nodeIdToDiscoveryNode; this.clusterStateVersion = clusterStateVersion; this.shardsIts = shardsIts; @@ -89,6 +88,7 @@ abstract class AbstractSearchAsyncAction // we need to add 1 for non active partition, since we count it in the total! expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); firstResults = new AtomicArray<>(shardsIts.size()); + this.aliasFilter = aliasFilter; } @@ -123,9 +123,10 @@ abstract class AbstractSearchAsyncAction if (node == null) { onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - String[] filteringAliases = perIndexFilteringAliases.get(shard.index().getName()); - sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, - startTime()), new ActionListener() { + AliasFilter filter = this.aliasFilter.get(shard.index().getName()); + ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shard, shardsIts.size(), + filter, startTime()); + sendExecuteFirstPhase(node, transportRequest , new ActionListener() { @Override public void onResponse(FirstResult result) { onFirstPhaseResult(shardIndex, shard, result, shardIt); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 171a97947b0..24b1033ca5f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; @@ -46,10 +47,10 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, - Map perIndexFilteringAliases, SearchPhaseController searchPhaseController, + Map aliasFilter, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, shardsIts, startTime, clusterStateVersion); this.searchPhaseController = searchPhaseController; queryFetchResults = new AtomicArray<>(firstResults.length()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index a0a5035335f..1af6d4da4d1 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; @@ -54,10 +55,10 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, - Map perIndexFilteringAliases, SearchPhaseController searchPhaseController, + Map aliasFilter, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, shardsIts, startTime, clusterStateVersion); this.searchPhaseController = searchPhaseController; queryResults = new AtomicArray<>(firstResults.length()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index 31372838142..4e8c3847ffc 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; @@ -38,13 +39,15 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, Map perIndexFilteringAliases, + Function nodeIdToDiscoveryNode, + Map aliasFilter, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, shardsIts, startTime, clusterStateVersion); this.searchPhaseController = searchPhaseController; + } @Override diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index edf651e1f2a..0bcae7502ee 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; @@ -49,11 +50,12 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, Map perIndexFilteringAliases, + Function nodeIdToDiscoveryNode, Map aliasFilter, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, perIndexFilteringAliases, executor, request, listener, + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, shardsIts, startTime, clusterStateVersion); this.searchPhaseController = searchPhaseController; fetchResults = new AtomicArray<>(firstResults.length()); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 54105dc82dc..d8caa72f612 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -38,6 +37,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -60,6 +60,7 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, String...concreteIndices) { + final Map aliasFilterMap = new HashMap<>(); + for (String index : concreteIndices) { + clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); + AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index, request.indices()); + if (aliasFilter != null) { + aliasFilterMap.put(index, aliasFilter); + } + } + return aliasFilterMap; } @Override @@ -85,14 +99,7 @@ public class TransportSearchAction extends HandledTransportAction filteringAliasLookup = new HashMap<>(); - - for (String index : concreteIndices) { - clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); - filteringAliasLookup.put(index, indexNameExpressionResolver.filteringAliases(clusterState, - index, searchRequest.indices())); - } - + Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, concreteIndices); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); GroupShardsIterator shardIterators = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, @@ -122,12 +129,12 @@ public class TransportSearchAction extends HandledTransportAction filteringAliasLookup, + ClusterState state, Map aliasFilter, ActionListener listener) { final Function nodesLookup = state.nodes()::get; final long clusterStateVersion = state.version(); @@ -136,22 +143,22 @@ public class TransportSearchAction extends HandledTransportAction - * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. - * Returns null if no filtering is required.

    - */ - public Query aliasFilter(QueryShardContext context, String... aliasNames) { - if (aliasNames == null || aliasNames.length == 0) { - return null; - } - final ImmutableOpenMap aliases = indexSettings.getIndexMetaData().getAliases(); - if (aliasNames.length == 1) { - AliasMetaData alias = aliases.get(aliasNames[0]); - if (alias == null) { - // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(index(), aliasNames[0], "Unknown alias name was passed to alias Filter"); - } - return parse(alias, context); - } else { - // we need to bench here a bit, to see maybe it makes sense to use OrFilter - BooleanQuery.Builder combined = new BooleanQuery.Builder(); - for (String aliasName : aliasNames) { - AliasMetaData alias = aliases.get(aliasName); - if (alias == null) { - // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0], - "Unknown alias name was passed to alias Filter"); - } - Query parsedFilter = parse(alias, context); - if (parsedFilter != null) { - combined.add(parsedFilter, BooleanClause.Occur.SHOULD); - } else { - // The filter might be null only if filter was removed after filteringAliases was called - return null; - } - } - return combined.build(); - } - } - - private Query parse(AliasMetaData alias, QueryShardContext shardContext) { - if (alias.filter() == null) { - return null; - } - try { - byte[] filterSource = alias.filter().uncompressed(); - try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) { - Optional innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder(); - if (innerQueryBuilder.isPresent()) { - return shardContext.toFilter(innerQueryBuilder.get()).query(); - } - return null; - } - } catch (IOException ex) { - throw new AliasFilterParsingException(shardContext.index(), alias.getAlias(), "Invalid alias filter", ex); - } - } - public IndexMetaData getMetaData() { return indexSettings.getIndexMetaData(); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 202d303ce8b..4c7e541aafa 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -65,6 +66,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -84,6 +86,7 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.merge.MergeStats; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -106,6 +109,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QueryPhase; @@ -128,6 +132,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -1207,4 +1212,12 @@ public class IndicesService extends AbstractLifecycleComponent (Index index, IndexSettings indexSettings) -> canDeleteIndexContents(index, indexSettings); private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true; + public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + Function factory = + (parser) -> new QueryParseContext(indicesQueriesRegistry, parser, new ParseFieldMatcher(settings)); + String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions); + IndexMetaData indexMetaData = state.metaData().index(index); + return new AliasFilter(ShardSearchRequest.parseAliasFilter(factory, indexMetaData, aliases), aliases); + } + } diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 006341dc046..a1c140639c1 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; @@ -74,6 +75,7 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -227,7 +229,12 @@ final class DefaultSearchContext extends SearchContext { } // initialize the filtering alias based on the provided filters - aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases()); + try { + final QueryBuilder queryBuilder = request.filteringAliases(); + aliasFilter = queryBuilder == null ? null : queryBuilder.toFilter(queryShardContext); + } catch (IOException e) { + throw new UncheckedIOException(e); + } if (query() == null) { parsedQuery(ParsedQuery.parsedMatchAllQuery()); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 47949573dd3..c12d0ff5263 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -22,8 +22,10 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.ObjectFloatHashMap; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -64,6 +66,7 @@ import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; @@ -262,7 +265,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv // execution exception can happen while loading the cache, strip it if (e instanceof ExecutionException) { e = (e.getCause() == null || e.getCause() instanceof Exception) ? - (Exception) e.getCause() : new ElasticsearchException(e.getCause()); + (Exception) e.getCause() : new ElasticsearchException(e.getCause()); } operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); @@ -449,7 +452,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } operationListener.onFetchPhase(context, System.nanoTime() - time2); return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), - context.shardTarget()); + context.shardTarget()); } catch (Exception e) { logger.trace("Fetch phase failed", e); processFailure(context, e); @@ -518,11 +521,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher); try { - // we clone the query shard context here just for rewriting otherwise we - // might end up with incorrect state since we are using now() or script services - // during rewrite and normalized / evaluate templates etc. - request.rewrite(new QueryShardContext(context.getQueryShardContext())); - assert context.getQueryShardContext().isCachable(); if (request.scroll() != null) { context.scrollContext(new ScrollContext()); context.scrollContext().scroll = request.scroll(); @@ -556,16 +554,30 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv return context; } - public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) { + public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) + throws IOException { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().getId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, - indexService, - indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, - timeout, fetchPhase); + final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, + engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, + timeout, fetchPhase); + boolean success = false; + try { + // we clone the query shard context here just for rewriting otherwise we + // might end up with incorrect state since we are using now() or script services + // during rewrite and normalized / evaluate templates etc. + request.rewrite(new QueryShardContext(searchContext.getQueryShardContext())); + assert searchContext.getQueryShardContext().isCachable(); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(searchContext); + } + } + return searchContext; } private void freeAllContextForIndex(Index index) { @@ -730,7 +742,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv if (source.scriptFields() != null) { for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { SearchScript searchScript = scriptService.search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, - Collections.emptyMap()); + Collections.emptyMap()); context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure())); } } @@ -853,10 +865,14 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } if ((time - lastAccessTime > context.keepAlive())) { logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time, - lastAccessTime, context.keepAlive()); + lastAccessTime, context.keepAlive()); freeContext(context.id()); } } } } + + public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + return indicesService.buildAliasFilter(state, index, expressions); + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java new file mode 100644 index 00000000000..9d22729b7a0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of. + */ +public final class AliasFilter implements Writeable { + public static final Version V_5_1_0 = Version.fromId(5010099); + private final String[] aliases; + private final QueryBuilder filter; + private final boolean reparseAliases; + + public AliasFilter(QueryBuilder filter, String... aliases) { + this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases; + this.filter = filter; + reparseAliases = false; // no bwc here - we only do this if we parse the filter + } + + public AliasFilter(StreamInput input) throws IOException { + aliases = input.readStringArray(); + if (input.getVersion().onOrAfter(V_5_1_0)) { + filter = input.readOptionalNamedWriteable(QueryBuilder.class); + reparseAliases = false; + } else { + reparseAliases = true; // alright we read from 5.0 + filter = null; + } + } + + private QueryBuilder reparseFilter(QueryRewriteContext context) { + if (reparseAliases) { + // we are processing a filter received from a 5.0 node - we need to reparse this on the executing node + final IndexMetaData indexMetaData = context.getIndexSettings().getIndexMetaData(); + return ShardSearchRequest.parseAliasFilter(context::newParseContext, indexMetaData, aliases); + } + return filter; + } + + AliasFilter rewrite(QueryRewriteContext context) throws IOException { + QueryBuilder queryBuilder = reparseFilter(context); + if (queryBuilder != null) { + return new AliasFilter(QueryBuilder.rewriteQuery(queryBuilder, context), aliases); + } + return new AliasFilter(filter, aliases); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(aliases); + if (out.getVersion().onOrAfter(V_5_1_0)) { + out.writeOptionalNamedWriteable(filter); + } + } + + /** + * Returns the aliases patters that are used to compose the {@link QueryBuilder} + * returned from {@link #getQueryBuilder()} + */ + public String[] getAliases() { + return aliases; + } + + /** + * Returns the alias filter {@link QueryBuilder} or null if there is no such filter + */ + public QueryBuilder getQueryBuilder() { + if (reparseAliases) { + // this is only for BWC since 5.0 still only sends aliases so this must be rewritten on the executing node + // if we talk to an older node we also only forward/write the string array which is compatible with the consumers + // in 5.0 see ExplainRequest and QueryValidationRequest + throw new IllegalStateException("alias filter for aliases: " + Arrays.toString(aliases) + " must be rewritten first"); + } + return filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AliasFilter that = (AliasFilter) o; + return reparseAliases == that.reparseAliases && + Arrays.equals(aliases, that.aliases) && + Objects.equals(filter, that.filter); + } + + @Override + public int hashCode() { + return Objects.hash(aliases, filter, reparseAliases); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0d6148011ed..0fe10fa71cd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -62,7 +63,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private SearchType searchType; private Scroll scroll; private String[] types = Strings.EMPTY_ARRAY; - private String[] filteringAliases; + private AliasFilter aliasFilter; private SearchSourceBuilder source; private Boolean requestCache; private long nowInMillis; @@ -73,29 +74,29 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, - String[] filteringAliases, long nowInMillis) { + AliasFilter aliasFilter, long nowInMillis) { this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.requestCache()); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter); this.scroll = searchRequest.scroll(); - this.filteringAliases = filteringAliases; this.nowInMillis = nowInMillis; } - public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) { + public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, AliasFilter aliasFilter) { this.types = types; this.nowInMillis = nowInMillis; - this.filteringAliases = filteringAliases; + this.aliasFilter = aliasFilter; this.shardId = shardId; } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, - Boolean requestCache) { + Boolean requestCache, AliasFilter aliasFilter) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; this.types = types; this.requestCache = requestCache; + this.aliasFilter = aliasFilter; } @@ -130,8 +131,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } @Override - public String[] filteringAliases() { - return filteringAliases; + public QueryBuilder filteringAliases() { + return aliasFilter.getQueryBuilder(); } @Override @@ -166,7 +167,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); - filteringAliases = in.readStringArray(); + aliasFilter = new AliasFilter(in); nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); } @@ -180,7 +181,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); out.writeStringArray(types); - out.writeStringArrayNullable(filteringAliases); + aliasFilter.writeTo(out); if (!asKey) { out.writeVLong(nowInMillis); } @@ -200,6 +201,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public void rewrite(QueryShardContext context) throws IOException { SearchSourceBuilder source = this.source; SearchSourceBuilder rewritten = null; + aliasFilter = aliasFilter.rewrite(context); while (rewritten != source) { rewritten = source.rewrite(context); source = rewritten; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 6c237322f04..01852506cdc 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -20,13 +20,26 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.AliasFilterParsingException; +import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; +import java.util.Optional; +import java.util.function.Function; /** * Shard level request that represents a search. @@ -47,7 +60,7 @@ public interface ShardSearchRequest { SearchType searchType(); - String[] filteringAliases(); + QueryBuilder filteringAliases(); long nowInMillis(); @@ -76,4 +89,64 @@ public interface ShardSearchRequest { * QueryBuilder. */ void rewrite(QueryShardContext context) throws IOException; + + /** + * Returns the filter associated with listed filtering aliases. + *

    + * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. + * Returns null if no filtering is required.

    + */ + static QueryBuilder parseAliasFilter(Function contextFactory, + IndexMetaData metaData, String... aliasNames) { + if (aliasNames == null || aliasNames.length == 0) { + return null; + } + Index index = metaData.getIndex(); + ImmutableOpenMap aliases = metaData.getAliases(); + Function parserFunction = (alias) -> { + if (alias.filter() == null) { + return null; + } + try { + byte[] filterSource = alias.filter().uncompressed(); + try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) { + Optional innerQueryBuilder = contextFactory.apply(parser).parseInnerQueryBuilder(); + if (innerQueryBuilder.isPresent()) { + return innerQueryBuilder.get(); + } + return null; + } + } catch (IOException ex) { + throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex); + } + }; + if (aliasNames.length == 1) { + AliasMetaData alias = aliases.get(aliasNames[0]); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter"); + } + return parserFunction.apply(alias); + } else { + // we need to bench here a bit, to see maybe it makes sense to use OrFilter + BoolQueryBuilder combined = new BoolQueryBuilder(); + for (String aliasName : aliasNames) { + AliasMetaData alias = aliases.get(aliasName); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], + "Unknown alias name was passed to alias Filter"); + } + QueryBuilder parsedFilter = parserFunction.apply(alias); + if (parsedFilter != null) { + combined.should(parsedFilter); + } else { + // The filter might be null only if filter was removed after filteringAliases was called + return null; + } + } + return combined; + } + } + } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 93013b94b36..1a92257dc34 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -51,8 +52,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } public ShardSearchTransportRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, - String[] filteringAliases, long nowInMillis) { - this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, filteringAliases, nowInMillis); + AliasFilter aliasFilter, long nowInMillis) { + this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, aliasFilter, nowInMillis); this.originalIndices = new OriginalIndices(searchRequest); } @@ -104,7 +105,7 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } @Override - public String[] filteringAliases() { + public QueryBuilder filteringAliases() { return shardSearchLocalRequest.filteringAliases(); } diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index c3238011f67..8ba5dc4868f 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -19,13 +19,14 @@ package org.elasticsearch; +import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; -import org.junit.Assert; import java.lang.reflect.Modifier; import java.util.HashMap; @@ -286,6 +287,9 @@ public class VersionTests extends ESTestCase { public void testUnknownVersions() { assertUnknownVersion(V_20_0_0_UNRELEASED); expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT)); + assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant + // once we released 5.0.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(ShardValidateQueryRequestTests.V_5_0_0); } public static void assertUnknownVersion(Version version) { diff --git a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java index 5e7040a707a..ad2cabefdbc 100644 --- a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action; import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -29,10 +30,12 @@ import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchRequestParsers; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Base64; import java.util.Collections; import java.util.List; @@ -56,7 +59,7 @@ public class ExplainRequestTests extends ESTestCase { try (BytesStreamOutput output = new BytesStreamOutput()) { ExplainRequest request = new ExplainRequest("index", "type", "id"); request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); - request.filteringAlias(new String[] {"alias0", "alias1"}); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"})); request.preference("the_preference"); request.query(QueryBuilders.termQuery("field", "value")); request.storedFields(new String[] {"field1", "field2"}); @@ -65,7 +68,7 @@ public class ExplainRequestTests extends ESTestCase { try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { ExplainRequest readRequest = new ExplainRequest(); readRequest.readFrom(in); - assertArrayEquals(request.filteringAlias(), readRequest.filteringAlias()); + assertEquals(request.filteringAlias(), readRequest.filteringAlias()); assertArrayEquals(request.storedFields(), readRequest.storedFields()); assertEquals(request.preference(), readRequest.preference()); assertEquals(request.query(), readRequest.query()); @@ -74,4 +77,36 @@ public class ExplainRequestTests extends ESTestCase { } } } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + ExplainRequest request = new ExplainRequest("index", "type", "id"); + request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"})); + request.preference("the_preference"); + request.query(QueryBuilders.termQuery("field", "value")); + request.storedFields(new String[] {"field1", "field2"}); + request.routing("some_routing"); + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAABBWluZGV4BHR5cGUCaWQBDHNvbWVfcm91dGluZwEOdGhlX3ByZWZlcmVuY2UEdGVybT" + + "+AAAAABWZpZWxkFQV2YWx1ZQIGYWxpYXMwBmFsaWFzMQECBmZpZWxkMQZmaWVsZDIBAQEIZmllbGQxLioBCGZpZWxkMi4qAA")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + ExplainRequest readRequest = new ExplainRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertArrayEquals(request.filteringAlias().getAliases(), readRequest.filteringAlias().getAliases()); + expectThrows(IllegalStateException.class, () -> readRequest.filteringAlias().getQueryBuilder()); + assertArrayEquals(request.storedFields(), readRequest.storedFields()); + assertEquals(request.preference(), readRequest.preference()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.routing(), readRequest.routing()); + assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java index 5d75722a13d..34c6999f4e8 100644 --- a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.action; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.validate.query.ShardValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -30,14 +32,17 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Base64; import java.util.Collections; import java.util.List; public class ShardValidateQueryRequestTests extends ESTestCase { + public static final Version V_5_0_0 = Version.fromId(5000099); protected NamedWriteableRegistry namedWriteableRegistry; protected SearchRequestParsers searchRequestParsers; @@ -61,12 +66,12 @@ public class ShardValidateQueryRequestTests extends ESTestCase { validateQueryRequest.explain(false); validateQueryRequest.types("type1", "type2"); ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), - new String[] {"alias0", "alias1"}, validateQueryRequest); + new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); request.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); readRequest.readFrom(in); - assertArrayEquals(request.filteringAliases(), readRequest.filteringAliases()); + assertEquals(request.filteringAliases(), readRequest.filteringAliases()); assertArrayEquals(request.types(), readRequest.types()); assertEquals(request.explain(), readRequest.explain()); assertEquals(request.query(), readRequest.query()); @@ -75,4 +80,35 @@ public class ShardValidateQueryRequestTests extends ESTestCase { } } } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); + validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); + validateQueryRequest.rewrite(true); + validateQueryRequest.explain(false); + validateQueryRequest.types("type1", "type2"); + ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), + new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAVpbmRleAZmb29iYXIBAQdpbmRpY2VzBAR0ZXJtP4AAAAAFZmllbGQVBXZhbHVlAgV0eXBlMQV0eXBlMgIGYWxpYXMwBmFsaWFzMQABAA")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(V_5_0_0); + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertArrayEquals(request.filteringAliases().getAliases(), readRequest.filteringAliases().getAliases()); + expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases().getQueryBuilder()); + assertArrayEquals(request.types(), readRequest.types()); + assertEquals(request.explain(), readRequest.explain()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.rewrite(), readRequest.rewrite()); + assertEquals(request.shardId(), readRequest.shardId()); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index afde263d73d..7ba78afb8c6 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -77,82 +77,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase { return new CompressedXContent(builder.string()); } - public void testFilteringAliases() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - add(indexService, "all", null); - - assertThat(indexService.getMetaData().getAliases().containsKey("cats"), equalTo(true)); - assertThat(indexService.getMetaData().getAliases().containsKey("dogs"), equalTo(true)); - assertThat(indexService.getMetaData().getAliases().containsKey("turtles"), equalTo(false)); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats").toString(), equalTo("animal:cat")); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "dogs").toString(), equalTo("animal:cat animal:dog")); - - // Non-filtering alias should turn off all filters because filters are ORed - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all"), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "all"), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all", "cats"), nullValue()); - - add(indexService, "cats", filter(termQuery("animal", "feline"))); - add(indexService, "dogs", filter(termQuery("animal", "canine"))); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline")); - } - - public void testAliasFilters() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext()), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs").toString(), equalTo("animal:dog")); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:dog animal:cat")); - - add(indexService, "cats", filter(termQuery("animal", "feline"))); - add(indexService, "dogs", filter(termQuery("animal", "canine"))); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline")); - } - - public void testRemovedAliasFilter() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - remove(indexService, "cats"); - try { - indexService.aliasFilter(indexService.newQueryShardContext(), "cats"); - fail("Expected InvalidAliasNameException"); - } catch (InvalidAliasNameException e) { - assertThat(e.getMessage(), containsString("Invalid alias name [cats]")); - } - } - - public void testUnknownAliasFilter() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - - try { - indexService.aliasFilter(indexService.newQueryShardContext(), "unknown"); - fail(); - } catch (InvalidAliasNameException e) { - // all is well - } - } - - private void remove(IndexService service, String alias) { - IndexMetaData build = IndexMetaData.builder(service.getMetaData()).removeAlias(alias).build(); - service.updateMetaData(build); - } - - private void add(IndexService service, String alias, @Nullable CompressedXContent filter) { - IndexMetaData build = IndexMetaData.builder(service.getMetaData()).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build(); - service.updateMetaData(build); - } - public void testBaseAsyncTask() throws InterruptedException, IOException { IndexService indexService = createIndex("test", Settings.EMPTY); AtomicReference latch = new AtomicReference<>(new CountDownLatch(1)); diff --git a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 2a961d58928..76d3bfbc484 100644 --- a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.ScriptService; @@ -81,8 +82,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { } @Override - public String[] filteringAliases() { - return new String[0]; + public QueryBuilder filteringAliases() { + return null; } @Override diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 07dd5d016c1..0803a788e8a 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -478,22 +478,21 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), equalTo(1L)); - // filtered alias is handled differently and must not be cached at this point r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); assertSearchResponse(r1); assertThat(r1.getHits().getTotalHits(), equalTo(1L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), equalTo(1L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + equalTo(2L)); r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); assertSearchResponse(r1); assertThat(r1.getHits().getTotalHits(), equalTo(1L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), - equalTo(1L)); + equalTo(2L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(1L)); + equalTo(2L)); } } diff --git a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java index e98b469f955..c29822990f0 100644 --- a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.script.Script; @@ -68,6 +69,7 @@ public abstract class AbstractSearchTestCase extends ESTestCase { protected NamedWriteableRegistry namedWriteableRegistry; protected SearchRequestParsers searchRequestParsers; private TestSearchExtPlugin searchExtPlugin; + protected IndicesQueriesRegistry queriesRegistry; public void setUp() throws Exception { super.setUp(); @@ -79,6 +81,7 @@ public abstract class AbstractSearchTestCase extends ESTestCase { entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); searchRequestParsers = searchModule.getSearchRequestParsers(); + queriesRegistry = searchModule.getQueryParserRegistry(); } protected SearchSourceBuilder createSearchSourceBuilder() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 7de8f6a4988..0def6726e03 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -41,6 +42,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -173,7 +175,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase { try { QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - new SearchSourceBuilder(), new String[0], false)); + new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY))); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null /* not a scroll */); diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 452b6b6ba3a..8c501d71e0a 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -19,21 +19,51 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.Version; +import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.RandomQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.AbstractSearchTestCase; import java.io.IOException; +import java.util.Base64; +import java.util.function.Function; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { + private IndexMetaData baseMetaData = IndexMetaData.builder("test").settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(1).build(); public void testSerialization() throws Exception { ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest(); @@ -43,7 +73,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { ShardSearchTransportRequest deserializedRequest = new ShardSearchTransportRequest(); deserializedRequest.readFrom(in); assertEquals(deserializedRequest.scroll(), shardSearchTransportRequest.scroll()); - assertArrayEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); + assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); assertArrayEquals(deserializedRequest.indices(), shardSearchTransportRequest.indices()); assertArrayEquals(deserializedRequest.types(), shardSearchTransportRequest.types()); assertEquals(deserializedRequest.indicesOptions(), shardSearchTransportRequest.indicesOptions()); @@ -55,6 +85,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards()); assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey()); assertNotSame(deserializedRequest, shardSearchTransportRequest); + assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); } } } @@ -64,13 +95,129 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt()); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason")); - String[] filteringAliases; + final AliasFilter filteringAliases; if (randomBoolean()) { - filteringAliases = generateRandomStringArray(10, 10, false, false); + String[] strings = generateRandomStringArray(10, 10, false, false); + filteringAliases = new AliasFilter(RandomQueryBuilder.createQuery(random()), strings); } else { - filteringAliases = Strings.EMPTY_ARRAY; + filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY); } return new ShardSearchTransportRequest(searchRequest, shardRouting, randomIntBetween(1, 100), filteringAliases, Math.abs(randomLong())); } + + public void testFilteringAliases() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog"))); + indexMetaData = add(indexMetaData, "all", null); + + assertThat(indexMetaData.getAliases().containsKey("cats"), equalTo(true)); + assertThat(indexMetaData.getAliases().containsKey("dogs"), equalTo(true)); + assertThat(indexMetaData.getAliases().containsKey("turtles"), equalTo(false)); + + assertEquals(aliasFilter(indexMetaData, "cats"), QueryBuilders.termQuery("animal", "cat")); + assertEquals(aliasFilter(indexMetaData, "cats", "dogs"), QueryBuilders.boolQuery().should(QueryBuilders.termQuery("animal", "cat")) + .should(QueryBuilders.termQuery("animal", "dog"))); + + // Non-filtering alias should turn off all filters because filters are ORed + assertThat(aliasFilter(indexMetaData,"all"), nullValue()); + assertThat(aliasFilter(indexMetaData, "cats", "all"), nullValue()); + assertThat(aliasFilter(indexMetaData, "all", "cats"), nullValue()); + + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "feline"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "canine"))); + assertEquals(aliasFilter(indexMetaData, "dogs", "cats"),QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("animal", "canine")) + .should(QueryBuilders.termQuery("animal", "feline"))); + } + + public void testRemovedAliasFilter() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = remove(indexMetaData, "cats"); + try { + aliasFilter(indexMetaData, "cats"); + fail("Expected InvalidAliasNameException"); + } catch (InvalidAliasNameException e) { + assertThat(e.getMessage(), containsString("Invalid alias name [cats]")); + } + } + + public void testUnknownAliasFilter() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog"))); + IndexMetaData finalIndexMetadata = indexMetaData; + expectThrows(InvalidAliasNameException.class, () -> aliasFilter(finalIndexMetadata, "unknown")); + } + + public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.close(); + return new CompressedXContent(builder.string()); + } + + private IndexMetaData remove(IndexMetaData indexMetaData, String alias) { + IndexMetaData build = IndexMetaData.builder(indexMetaData).removeAlias(alias).build(); + return build; + } + + private IndexMetaData add(IndexMetaData indexMetaData, String alias, @Nullable CompressedXContent filter) { + return IndexMetaData.builder(indexMetaData).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build(); + } + + public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) { + Function contextFactory = (p) -> new QueryParseContext(queriesRegistry, + p, new ParseFieldMatcher(Settings.EMPTY)); + return ShardSearchRequest.parseAliasFilter(contextFactory, indexMetaData, aliasNames); + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAh4cXptdEhJcgdnT0d1ZldWyfL/sgQBJAHkDAMBAAIBAQ4TWlljWlZ5TkVmRU5xQnFQVHBjVBRZbUpod2pRV2dDSXVxRXpRaEdGVBRFZWFJY0plT2hn" + + "UEpISFhmSXR6Qw5XZ1hQcmFidWhWalFSQghuUWNwZ2JjQxBtZldRREJPaGF3UnlQSE56EVhQSUtRa25Iekh3bU5kbGVECWlFT2NIeEh3RgZIYXpMTWgUeGJq" + + "VU9Tdkdua3RORU5QZkNrb1EOalRyWGh5WXhvZ3plV2UUcWlXZFl2eUFUSXdPVGdMUUtYTHAJU3RKR3JxQkVJEkdEQ01xUHpnWWNaT3N3U3prSRIUeURlVFpM" + + "Q1lBZERZcWpDb3NOVWIST1NyQlZtdUNrd0F1UXRvdVRjEGp6RlVMd1dqc3VtUVNaTk0JT3N2cnpLQ3ZLBmRpS1J6cgdYbmVhZnBxBUlTUU9pEEJMcm1ERXVs" + + "eXhESlBoVkgTaWdUUmtVZGh4d0FFc2ZKRm9ZahNrb01XTnFFd2NWSVVDU3pWS2xBC3JVTWV3V2tUUWJUE3VGQU1Hd21CYUFMTmNQZkxobXUIZ3dxWHBxWXcF" + + "bmNDZUEOTFBSTEpYZVF6Z3d2eE0PV1BucUFacll6WWRxa1hCDGxkbXNMaVRzcUZXbAtSY0NsY3FNdlJQcv8BAP////8PAQAAARQAAQp5THlIcHdQeGtMAAAB" + + "AQAAAAEDbkVLAQMBCgACAAADAQABAAAAAQhIc25wRGxQbwEBQgABAAACAQMAAAEIAAAJMF9OSG9kSmh2HwABAwljRW5MVWxFbVQFemlxWG8KcXZQTkRUUGJk" + + "bgECCkpMbXVMT1dtVnkISEdUUHhsd0cBAAEJAAABA2lkcz+rKsUAAAAAAAAAAAECAQYAAgwxX0ZlRWxSQkhzQ07/////DwABAAEDCnRyYXFHR1hjVHkKTERY" + + "aE1HRWVySghuSWtzbEtXUwABCgEHSlRwQnhwdwAAAQECAgAAAAAAAQcyX3FlYmNDGQEEBklxZU9iUQdTc01Gek5YCWlMd2xuamNRQwNiVncAAUHt61kAAQR0" + + "ZXJtP4AAAAANbUtDSnpHU3lidm5KUBUMaVpqeG9vcm5QSFlvAAEBLGdtcWxuRWpWTXdvTlhMSHh0RWlFdHBnbEF1cUNmVmhoUVlwRFZxVllnWWV1A2ZvbwEA" + + "AQhwYWlubGVzc/8AALk4AAAAAAABAAAAAAAAAwpKU09PU0ZmWnhFClVqTGxMa2p3V2gKdUJwZ3R3dXFER5Hg97uT7MOmPgEADw")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + ShardSearchTransportRequest readRequest = new ShardSearchTransportRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases()); + assertEquals("alias filter for aliases: [JSOOSFfZxE, UjLlLkjwWh, uBpgtwuqDG] must be rewritten first", + illegalStateException.getMessage()); + IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder(baseMetaData) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("JSOOSFfZxE").filter("{\"term\" : {\"foo\" : \"bar\"}}")) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("UjLlLkjwWh").filter("{\"term\" : {\"foo\" : \"bar1\"}}")) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("uBpgtwuqDG").filter("{\"term\" : {\"foo\" : \"bar2\"}}")); + IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.EMPTY); + final long nowInMillis = randomPositiveLong(); + QueryShardContext context = new QueryShardContext( + 0, indexSettings, null, null, null, null, null, queriesRegistry, null, null, null, + () -> nowInMillis); + readRequest.rewrite(context); + QueryBuilder queryBuilder = readRequest.filteringAliases(); + assertEquals(queryBuilder, QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("foo", "bar")) + .should(QueryBuilders.termQuery("foo", "bar1")) + .should(QueryBuilders.termQuery("foo", "bar2")) + ); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } + } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml index 1923377ba83..4ea921a3fa0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml @@ -4,7 +4,9 @@ setup: index: test_1 body: aliases: - alias_1: {} + alias_1: { + "filter" : { "term" : { "foo" : "bar"} } + } - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml index a1f9aa87636..637ebd4253e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml @@ -5,12 +5,24 @@ setup: body: settings: number_of_replicas: 0 + aliases: + alias_1: { + "filter" : { "match_all" : {} } + } --- "Validate query api": - do: indices.validate_query: q: query string + index: testing + + - is_true: valid + + - do: + indices.validate_query: + q: query string + index: alias_1 - is_true: valid From 370253f95a7910db0e2dd4b86d6240af688d6b46 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 14 Oct 2016 05:48:41 -0400 Subject: [PATCH 53/53] Add doc note regarding processors bound This commit expands the thread pool docs regarding the processor setting. Relates #20895 --- docs/reference/modules/threadpool.asciidoc | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index a333312e0fb..4e8b5c61efd 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -118,14 +118,30 @@ thread_pool: [[processors]] === Processors setting The number of processors is automatically detected, and the thread pool -settings are automatically set based on it. Sometimes, the number of processors -are wrongly detected, in such cases, the number of processors can be -explicitly set using the `processors` setting. +settings are automatically set based on it. In some cases it can be +useful to override the number of detected processors. This can be done +by explicitly setting the `processors` setting. [source,yaml] -------------------------------------------------- processors: 2 -------------------------------------------------- +There are a few use-cases for explicitly overriding the `processors` +setting: + +. If you are running multiple instances of Elasticsearch on the same +host but want Elasticsearch to size its thread pools as if it only has a +fraction of the CPU, you should override the `processors` setting to the +desired fraction (e.g., if you're running two instances of Elasticsearch +on a 16-core machine, set `processors` to 8). Note that this is an +expert-level use-case and there's a lot more involved than just setting +the `processors` setting as there are other considerations like changing +the number of garbage collector threads, pinning processes to cores, +etc. +. Sometimes the number of processors is wrongly detected and in such +cases explicitly setting the `processors` setting will workaround such +issues. + In order to check the number of processors detected, use the nodes info API with the `os` flag.